2024-12-08 05:48:40,857 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-12-08 05:48:40,897 main DEBUG Took 0.037389 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-08 05:48:40,898 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-08 05:48:40,898 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-08 05:48:40,899 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-08 05:48:40,901 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 05:48:40,942 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-08 05:48:40,978 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 05:48:40,980 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 05:48:40,980 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 05:48:40,981 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 05:48:40,982 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 05:48:40,982 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 05:48:40,986 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 05:48:40,986 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 05:48:40,987 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 05:48:40,987 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 05:48:40,989 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 05:48:40,989 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 05:48:40,990 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 05:48:40,990 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 05:48:40,991 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 05:48:40,992 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 05:48:40,994 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 05:48:40,994 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 05:48:40,995 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 05:48:40,995 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 05:48:40,996 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 05:48:40,996 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 05:48:40,997 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 05:48:40,997 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 05:48:40,998 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 05:48:40,998 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-08 05:48:41,000 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 05:48:41,002 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-08 05:48:41,005 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-08 05:48:41,005 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-08 05:48:41,007 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-08 05:48:41,007 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-08 05:48:41,019 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-08 05:48:41,022 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-08 05:48:41,025 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-08 05:48:41,025 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-08 05:48:41,026 main DEBUG createAppenders(={Console}) 2024-12-08 05:48:41,027 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-12-08 05:48:41,027 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-12-08 05:48:41,027 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-12-08 05:48:41,028 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-08 05:48:41,029 main DEBUG OutputStream closed 2024-12-08 05:48:41,029 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-08 05:48:41,029 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-08 05:48:41,030 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-12-08 05:48:41,110 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-08 05:48:41,113 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-08 05:48:41,114 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-08 05:48:41,116 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-08 05:48:41,116 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-08 05:48:41,117 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-08 05:48:41,117 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-08 05:48:41,118 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-08 05:48:41,118 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-08 05:48:41,119 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-08 05:48:41,119 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-08 05:48:41,119 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-08 05:48:41,120 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-08 05:48:41,120 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-08 05:48:41,121 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-08 05:48:41,121 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-08 05:48:41,122 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-08 05:48:41,123 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-08 05:48:41,125 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-08 05:48:41,125 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-12-08 05:48:41,126 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-08 05:48:41,126 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-12-08T05:48:41,490 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad4fbbec-2e5f-4815-8593-c0e716e97e99 2024-12-08 05:48:41,495 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-08 05:48:41,495 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-08T05:48:41,508 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-12-08T05:48:41,552 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=271, ProcessCount=12, AvailableMemoryMB=8549 2024-12-08T05:48:41,556 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-08T05:48:41,577 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad4fbbec-2e5f-4815-8593-c0e716e97e99/cluster_91652ab4-b278-fb5a-2747-de5bc494f0d9, deleteOnExit=true 2024-12-08T05:48:41,577 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-08T05:48:41,578 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad4fbbec-2e5f-4815-8593-c0e716e97e99/test.cache.data in system properties and HBase conf 2024-12-08T05:48:41,579 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad4fbbec-2e5f-4815-8593-c0e716e97e99/hadoop.tmp.dir in system properties and HBase conf 2024-12-08T05:48:41,580 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad4fbbec-2e5f-4815-8593-c0e716e97e99/hadoop.log.dir in system properties and HBase conf 2024-12-08T05:48:41,581 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad4fbbec-2e5f-4815-8593-c0e716e97e99/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-08T05:48:41,581 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad4fbbec-2e5f-4815-8593-c0e716e97e99/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-08T05:48:41,582 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-08T05:48:41,681 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-08T05:48:41,804 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-08T05:48:41,809 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad4fbbec-2e5f-4815-8593-c0e716e97e99/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-08T05:48:41,810 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad4fbbec-2e5f-4815-8593-c0e716e97e99/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-08T05:48:41,811 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad4fbbec-2e5f-4815-8593-c0e716e97e99/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-08T05:48:41,811 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad4fbbec-2e5f-4815-8593-c0e716e97e99/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-08T05:48:41,812 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad4fbbec-2e5f-4815-8593-c0e716e97e99/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-08T05:48:41,813 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad4fbbec-2e5f-4815-8593-c0e716e97e99/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-08T05:48:41,813 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad4fbbec-2e5f-4815-8593-c0e716e97e99/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-08T05:48:41,814 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad4fbbec-2e5f-4815-8593-c0e716e97e99/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-08T05:48:41,815 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad4fbbec-2e5f-4815-8593-c0e716e97e99/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-08T05:48:41,815 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad4fbbec-2e5f-4815-8593-c0e716e97e99/nfs.dump.dir in system properties and HBase conf 2024-12-08T05:48:41,816 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad4fbbec-2e5f-4815-8593-c0e716e97e99/java.io.tmpdir in system properties and HBase conf 2024-12-08T05:48:41,816 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad4fbbec-2e5f-4815-8593-c0e716e97e99/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-08T05:48:41,817 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad4fbbec-2e5f-4815-8593-c0e716e97e99/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-08T05:48:41,817 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad4fbbec-2e5f-4815-8593-c0e716e97e99/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-08T05:48:42,369 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-08T05:48:42,700 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-08T05:48:42,772 INFO [Time-limited test {}] log.Log(170): Logging initialized @2955ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-08T05:48:42,842 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T05:48:42,900 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T05:48:42,919 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T05:48:42,919 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T05:48:42,920 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-08T05:48:42,933 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T05:48:42,935 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@75bdea07{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad4fbbec-2e5f-4815-8593-c0e716e97e99/hadoop.log.dir/,AVAILABLE} 2024-12-08T05:48:42,936 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@455f3457{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T05:48:43,122 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5f961078{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad4fbbec-2e5f-4815-8593-c0e716e97e99/java.io.tmpdir/jetty-localhost-42343-hadoop-hdfs-3_4_1-tests_jar-_-any-132903998525731804/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-08T05:48:43,129 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@25dfddc5{HTTP/1.1, (http/1.1)}{localhost:42343} 2024-12-08T05:48:43,129 INFO [Time-limited test {}] server.Server(415): Started @3313ms 2024-12-08T05:48:43,154 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-08T05:48:43,504 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T05:48:43,510 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T05:48:43,511 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T05:48:43,512 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T05:48:43,512 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-08T05:48:43,513 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@616d254c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad4fbbec-2e5f-4815-8593-c0e716e97e99/hadoop.log.dir/,AVAILABLE} 2024-12-08T05:48:43,513 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@198fe7a1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T05:48:43,634 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@32c41a8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad4fbbec-2e5f-4815-8593-c0e716e97e99/java.io.tmpdir/jetty-localhost-45989-hadoop-hdfs-3_4_1-tests_jar-_-any-7895431553129806446/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T05:48:43,635 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@21c64e78{HTTP/1.1, (http/1.1)}{localhost:45989} 2024-12-08T05:48:43,635 INFO [Time-limited test {}] server.Server(415): Started @3819ms 2024-12-08T05:48:43,692 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T05:48:43,816 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T05:48:43,822 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T05:48:43,824 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T05:48:43,824 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T05:48:43,824 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-08T05:48:43,826 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1612a852{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad4fbbec-2e5f-4815-8593-c0e716e97e99/hadoop.log.dir/,AVAILABLE} 2024-12-08T05:48:43,827 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2e06ea5e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T05:48:43,947 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@78be0d39{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad4fbbec-2e5f-4815-8593-c0e716e97e99/java.io.tmpdir/jetty-localhost-36859-hadoop-hdfs-3_4_1-tests_jar-_-any-12050877659750843993/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T05:48:43,948 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@617aa169{HTTP/1.1, (http/1.1)}{localhost:36859} 2024-12-08T05:48:43,948 INFO [Time-limited test {}] server.Server(415): Started @4132ms 2024-12-08T05:48:43,950 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T05:48:44,137 WARN [Thread-97 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad4fbbec-2e5f-4815-8593-c0e716e97e99/cluster_91652ab4-b278-fb5a-2747-de5bc494f0d9/data/data3/current/BP-1789351033-172.17.0.2-1733636922461/current, will proceed with Du for space computation calculation, 2024-12-08T05:48:44,137 WARN [Thread-96 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad4fbbec-2e5f-4815-8593-c0e716e97e99/cluster_91652ab4-b278-fb5a-2747-de5bc494f0d9/data/data2/current/BP-1789351033-172.17.0.2-1733636922461/current, will proceed with Du for space computation calculation, 2024-12-08T05:48:44,137 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad4fbbec-2e5f-4815-8593-c0e716e97e99/cluster_91652ab4-b278-fb5a-2747-de5bc494f0d9/data/data4/current/BP-1789351033-172.17.0.2-1733636922461/current, will proceed with Du for space computation calculation, 2024-12-08T05:48:44,137 WARN [Thread-95 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad4fbbec-2e5f-4815-8593-c0e716e97e99/cluster_91652ab4-b278-fb5a-2747-de5bc494f0d9/data/data1/current/BP-1789351033-172.17.0.2-1733636922461/current, will proceed with Du for space computation calculation, 2024-12-08T05:48:44,197 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T05:48:44,197 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T05:48:44,275 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5635a3712fd2bdf1 with lease ID 0x3a52221c6ec83a63: Processing first storage report for DS-cf3b8273-3ba7-4eff-9a12-93a646be6713 from datanode DatanodeRegistration(127.0.0.1:36757, datanodeUuid=be95e663-f2a9-4e51-ba7d-6dde5fb2ed73, infoPort=37471, infoSecurePort=0, ipcPort=43783, storageInfo=lv=-57;cid=testClusterID;nsid=332181314;c=1733636922461) 2024-12-08T05:48:44,276 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5635a3712fd2bdf1 with lease ID 0x3a52221c6ec83a63: from storage DS-cf3b8273-3ba7-4eff-9a12-93a646be6713 node DatanodeRegistration(127.0.0.1:36757, datanodeUuid=be95e663-f2a9-4e51-ba7d-6dde5fb2ed73, infoPort=37471, infoSecurePort=0, ipcPort=43783, storageInfo=lv=-57;cid=testClusterID;nsid=332181314;c=1733636922461), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-08T05:48:44,277 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe9de220d370d18d2 with lease ID 0x3a52221c6ec83a64: Processing first storage report for DS-0ab98241-3aa4-4877-b9bd-6456d6f05af2 from datanode DatanodeRegistration(127.0.0.1:45301, datanodeUuid=6ccd3602-0229-4e3b-96de-8d70753dc2d3, infoPort=42719, infoSecurePort=0, ipcPort=39107, storageInfo=lv=-57;cid=testClusterID;nsid=332181314;c=1733636922461) 2024-12-08T05:48:44,277 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe9de220d370d18d2 with lease ID 0x3a52221c6ec83a64: from storage DS-0ab98241-3aa4-4877-b9bd-6456d6f05af2 node DatanodeRegistration(127.0.0.1:45301, datanodeUuid=6ccd3602-0229-4e3b-96de-8d70753dc2d3, infoPort=42719, infoSecurePort=0, ipcPort=39107, storageInfo=lv=-57;cid=testClusterID;nsid=332181314;c=1733636922461), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T05:48:44,277 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5635a3712fd2bdf1 with lease ID 0x3a52221c6ec83a63: Processing first storage report for DS-336f8240-5051-4c22-bc94-b96d9b8ed21a from datanode DatanodeRegistration(127.0.0.1:36757, datanodeUuid=be95e663-f2a9-4e51-ba7d-6dde5fb2ed73, infoPort=37471, infoSecurePort=0, ipcPort=43783, storageInfo=lv=-57;cid=testClusterID;nsid=332181314;c=1733636922461) 2024-12-08T05:48:44,277 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5635a3712fd2bdf1 with lease ID 0x3a52221c6ec83a63: from storage DS-336f8240-5051-4c22-bc94-b96d9b8ed21a node DatanodeRegistration(127.0.0.1:36757, datanodeUuid=be95e663-f2a9-4e51-ba7d-6dde5fb2ed73, infoPort=37471, infoSecurePort=0, ipcPort=43783, storageInfo=lv=-57;cid=testClusterID;nsid=332181314;c=1733636922461), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T05:48:44,277 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe9de220d370d18d2 with lease ID 0x3a52221c6ec83a64: Processing first storage report for DS-18b8bcd4-d862-4415-ada1-f4cf30bd10f2 from datanode DatanodeRegistration(127.0.0.1:45301, datanodeUuid=6ccd3602-0229-4e3b-96de-8d70753dc2d3, infoPort=42719, infoSecurePort=0, ipcPort=39107, storageInfo=lv=-57;cid=testClusterID;nsid=332181314;c=1733636922461) 2024-12-08T05:48:44,278 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe9de220d370d18d2 with lease ID 0x3a52221c6ec83a64: from storage DS-18b8bcd4-d862-4415-ada1-f4cf30bd10f2 node DatanodeRegistration(127.0.0.1:45301, datanodeUuid=6ccd3602-0229-4e3b-96de-8d70753dc2d3, infoPort=42719, infoSecurePort=0, ipcPort=39107, storageInfo=lv=-57;cid=testClusterID;nsid=332181314;c=1733636922461), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T05:48:44,350 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad4fbbec-2e5f-4815-8593-c0e716e97e99 2024-12-08T05:48:44,426 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad4fbbec-2e5f-4815-8593-c0e716e97e99/cluster_91652ab4-b278-fb5a-2747-de5bc494f0d9/zookeeper_0, clientPort=49643, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad4fbbec-2e5f-4815-8593-c0e716e97e99/cluster_91652ab4-b278-fb5a-2747-de5bc494f0d9/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad4fbbec-2e5f-4815-8593-c0e716e97e99/cluster_91652ab4-b278-fb5a-2747-de5bc494f0d9/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-08T05:48:44,437 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=49643 2024-12-08T05:48:44,446 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:48:44,448 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:48:44,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741825_1001 (size=7) 2024-12-08T05:48:44,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36757 is added to blk_1073741825_1001 (size=7) 2024-12-08T05:48:45,103 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976 with version=8 2024-12-08T05:48:45,103 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/hbase-staging 2024-12-08T05:48:45,193 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-08T05:48:45,442 INFO [Time-limited test {}] client.ConnectionUtils(128): master/0d942cb2025d:0 server-side Connection retries=45 2024-12-08T05:48:45,454 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T05:48:45,455 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T05:48:45,459 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T05:48:45,459 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T05:48:45,459 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T05:48:45,593 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-08T05:48:45,652 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-08T05:48:45,661 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-08T05:48:45,665 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T05:48:45,691 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 32741 (auto-detected) 2024-12-08T05:48:45,692 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-08T05:48:45,711 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39457 2024-12-08T05:48:45,732 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:39457 connecting to ZooKeeper ensemble=127.0.0.1:49643 2024-12-08T05:48:45,764 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:394570x0, quorum=127.0.0.1:49643, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T05:48:45,767 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:39457-0x101909e0e880000 connected 2024-12-08T05:48:45,798 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:48:45,800 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:48:45,813 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39457-0x101909e0e880000, quorum=127.0.0.1:49643, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T05:48:45,819 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976, hbase.cluster.distributed=false 2024-12-08T05:48:45,848 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39457-0x101909e0e880000, quorum=127.0.0.1:49643, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T05:48:45,852 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39457 2024-12-08T05:48:45,852 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39457 2024-12-08T05:48:45,853 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39457 2024-12-08T05:48:45,853 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39457 2024-12-08T05:48:45,854 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39457 2024-12-08T05:48:45,971 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/0d942cb2025d:0 server-side Connection retries=45 2024-12-08T05:48:45,973 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T05:48:45,973 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T05:48:45,974 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T05:48:45,974 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T05:48:45,974 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T05:48:45,977 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-08T05:48:45,979 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T05:48:45,980 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42137 2024-12-08T05:48:45,982 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42137 connecting to ZooKeeper ensemble=127.0.0.1:49643 2024-12-08T05:48:45,983 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:48:45,988 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:48:45,995 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:421370x0, quorum=127.0.0.1:49643, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T05:48:45,996 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42137-0x101909e0e880001 connected 2024-12-08T05:48:45,996 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42137-0x101909e0e880001, quorum=127.0.0.1:49643, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T05:48:46,000 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-08T05:48:46,009 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-08T05:48:46,012 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42137-0x101909e0e880001, quorum=127.0.0.1:49643, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-08T05:48:46,017 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42137-0x101909e0e880001, quorum=127.0.0.1:49643, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T05:48:46,018 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42137 2024-12-08T05:48:46,018 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42137 2024-12-08T05:48:46,018 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42137 2024-12-08T05:48:46,019 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42137 2024-12-08T05:48:46,019 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42137 2024-12-08T05:48:46,036 DEBUG [M:0;0d942cb2025d:39457 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;0d942cb2025d:39457 2024-12-08T05:48:46,037 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/0d942cb2025d,39457,1733636925244 2024-12-08T05:48:46,043 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42137-0x101909e0e880001, quorum=127.0.0.1:49643, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T05:48:46,043 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39457-0x101909e0e880000, quorum=127.0.0.1:49643, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T05:48:46,045 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39457-0x101909e0e880000, quorum=127.0.0.1:49643, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/0d942cb2025d,39457,1733636925244 2024-12-08T05:48:46,065 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42137-0x101909e0e880001, quorum=127.0.0.1:49643, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-08T05:48:46,065 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39457-0x101909e0e880000, quorum=127.0.0.1:49643, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:48:46,065 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42137-0x101909e0e880001, quorum=127.0.0.1:49643, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:48:46,066 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39457-0x101909e0e880000, quorum=127.0.0.1:49643, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-08T05:48:46,067 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/0d942cb2025d,39457,1733636925244 from backup master directory 2024-12-08T05:48:46,071 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39457-0x101909e0e880000, quorum=127.0.0.1:49643, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/0d942cb2025d,39457,1733636925244 2024-12-08T05:48:46,071 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42137-0x101909e0e880001, quorum=127.0.0.1:49643, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T05:48:46,072 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39457-0x101909e0e880000, quorum=127.0.0.1:49643, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T05:48:46,072 WARN [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T05:48:46,072 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=0d942cb2025d,39457,1733636925244 2024-12-08T05:48:46,074 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-08T05:48:46,076 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-08T05:48:46,136 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/hbase.id] with ID: cc5863f6-eac3-4d5b-ae71-ae63c5ee94ad 2024-12-08T05:48:46,136 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/.tmp/hbase.id 2024-12-08T05:48:46,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741826_1002 (size=42) 2024-12-08T05:48:46,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36757 is added to blk_1073741826_1002 (size=42) 2024-12-08T05:48:46,150 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/.tmp/hbase.id]:[hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/hbase.id] 2024-12-08T05:48:46,192 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:48:46,197 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-08T05:48:46,216 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 17ms. 2024-12-08T05:48:46,220 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39457-0x101909e0e880000, quorum=127.0.0.1:49643, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:48:46,220 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42137-0x101909e0e880001, quorum=127.0.0.1:49643, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:48:46,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36757 is added to blk_1073741827_1003 (size=196) 2024-12-08T05:48:46,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741827_1003 (size=196) 2024-12-08T05:48:46,253 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T05:48:46,256 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-08T05:48:46,262 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T05:48:46,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36757 is added to blk_1073741828_1004 (size=1189) 2024-12-08T05:48:46,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741828_1004 (size=1189) 2024-12-08T05:48:46,318 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/MasterData/data/master/store 2024-12-08T05:48:46,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741829_1005 (size=34) 2024-12-08T05:48:46,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36757 is added to blk_1073741829_1005 (size=34) 2024-12-08T05:48:46,348 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-08T05:48:46,351 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T05:48:46,353 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-08T05:48:46,353 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:48:46,353 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:48:46,355 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-08T05:48:46,355 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:48:46,355 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:48:46,357 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733636926353Disabling compacts and flushes for region at 1733636926353Disabling writes for close at 1733636926355 (+2 ms)Writing region close event to WAL at 1733636926355Closed at 1733636926355 2024-12-08T05:48:46,359 WARN [master/0d942cb2025d:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/MasterData/data/master/store/.initializing 2024-12-08T05:48:46,359 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/MasterData/WALs/0d942cb2025d,39457,1733636925244 2024-12-08T05:48:46,383 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0d942cb2025d%2C39457%2C1733636925244, suffix=, logDir=hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/MasterData/WALs/0d942cb2025d,39457,1733636925244, archiveDir=hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/MasterData/oldWALs, maxLogs=10 2024-12-08T05:48:46,394 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0d942cb2025d%2C39457%2C1733636925244.1733636926389 2024-12-08T05:48:46,423 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/MasterData/WALs/0d942cb2025d,39457,1733636925244/0d942cb2025d%2C39457%2C1733636925244.1733636926389 2024-12-08T05:48:46,443 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37471:37471),(127.0.0.1/127.0.0.1:42719:42719)] 2024-12-08T05:48:46,445 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-08T05:48:46,445 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T05:48:46,449 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:48:46,450 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:48:46,494 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:48:46,526 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-08T05:48:46,529 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:48:46,533 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:48:46,533 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:48:46,537 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-08T05:48:46,537 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:48:46,538 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T05:48:46,538 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:48:46,541 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-08T05:48:46,541 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:48:46,542 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T05:48:46,542 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:48:46,544 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-08T05:48:46,545 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:48:46,545 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T05:48:46,546 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:48:46,549 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:48:46,550 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:48:46,555 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:48:46,556 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:48:46,559 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-08T05:48:46,563 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:48:46,568 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T05:48:46,570 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=876931, jitterRate=0.11507594585418701}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-08T05:48:46,575 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733636926464Initializing all the Stores at 1733636926467 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733636926468 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733636926468Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733636926469 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733636926469Cleaning up temporary data from old regions at 1733636926556 (+87 ms)Region opened successfully at 1733636926575 (+19 ms) 2024-12-08T05:48:46,577 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-08T05:48:46,612 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d3bdd8e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0d942cb2025d/172.17.0.2:0 2024-12-08T05:48:46,649 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-08T05:48:46,660 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-08T05:48:46,661 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-08T05:48:46,664 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-08T05:48:46,665 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-08T05:48:46,670 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-12-08T05:48:46,670 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-08T05:48:46,699 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-08T05:48:46,711 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39457-0x101909e0e880000, quorum=127.0.0.1:49643, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-08T05:48:46,714 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-08T05:48:46,716 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-08T05:48:46,718 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39457-0x101909e0e880000, quorum=127.0.0.1:49643, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-08T05:48:46,720 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-08T05:48:46,722 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-08T05:48:46,726 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39457-0x101909e0e880000, quorum=127.0.0.1:49643, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-08T05:48:46,729 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-08T05:48:46,731 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39457-0x101909e0e880000, quorum=127.0.0.1:49643, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-08T05:48:46,732 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-08T05:48:46,750 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39457-0x101909e0e880000, quorum=127.0.0.1:49643, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-08T05:48:46,752 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-08T05:48:46,757 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39457-0x101909e0e880000, quorum=127.0.0.1:49643, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T05:48:46,757 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42137-0x101909e0e880001, quorum=127.0.0.1:49643, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T05:48:46,757 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39457-0x101909e0e880000, quorum=127.0.0.1:49643, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:48:46,758 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42137-0x101909e0e880001, quorum=127.0.0.1:49643, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:48:46,760 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=0d942cb2025d,39457,1733636925244, sessionid=0x101909e0e880000, setting cluster-up flag (Was=false) 2024-12-08T05:48:46,774 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39457-0x101909e0e880000, quorum=127.0.0.1:49643, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:48:46,774 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42137-0x101909e0e880001, quorum=127.0.0.1:49643, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:48:46,780 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-08T05:48:46,782 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0d942cb2025d,39457,1733636925244 2024-12-08T05:48:46,787 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39457-0x101909e0e880000, quorum=127.0.0.1:49643, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:48:46,787 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42137-0x101909e0e880001, quorum=127.0.0.1:49643, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:48:46,793 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-08T05:48:46,795 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0d942cb2025d,39457,1733636925244 2024-12-08T05:48:46,801 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-08T05:48:46,824 INFO [RS:0;0d942cb2025d:42137 {}] regionserver.HRegionServer(746): ClusterId : cc5863f6-eac3-4d5b-ae71-ae63c5ee94ad 2024-12-08T05:48:46,827 DEBUG [RS:0;0d942cb2025d:42137 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-08T05:48:46,832 DEBUG [RS:0;0d942cb2025d:42137 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-08T05:48:46,833 DEBUG [RS:0;0d942cb2025d:42137 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-08T05:48:46,835 DEBUG [RS:0;0d942cb2025d:42137 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-08T05:48:46,836 DEBUG [RS:0;0d942cb2025d:42137 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4568e746, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0d942cb2025d/172.17.0.2:0 2024-12-08T05:48:46,854 DEBUG [RS:0;0d942cb2025d:42137 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;0d942cb2025d:42137 2024-12-08T05:48:46,857 INFO [RS:0;0d942cb2025d:42137 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-08T05:48:46,857 INFO [RS:0;0d942cb2025d:42137 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-08T05:48:46,858 DEBUG [RS:0;0d942cb2025d:42137 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-08T05:48:46,860 INFO [RS:0;0d942cb2025d:42137 {}] regionserver.HRegionServer(2659): reportForDuty to master=0d942cb2025d,39457,1733636925244 with port=42137, startcode=1733636925929 2024-12-08T05:48:46,871 DEBUG [RS:0;0d942cb2025d:42137 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-08T05:48:46,875 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-08T05:48:46,884 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-08T05:48:46,891 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-08T05:48:46,896 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 0d942cb2025d,39457,1733636925244 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-08T05:48:46,903 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/0d942cb2025d:0, corePoolSize=5, maxPoolSize=5 2024-12-08T05:48:46,903 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/0d942cb2025d:0, corePoolSize=5, maxPoolSize=5 2024-12-08T05:48:46,903 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/0d942cb2025d:0, corePoolSize=5, maxPoolSize=5 2024-12-08T05:48:46,903 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/0d942cb2025d:0, corePoolSize=5, maxPoolSize=5 2024-12-08T05:48:46,903 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/0d942cb2025d:0, corePoolSize=10, maxPoolSize=10 2024-12-08T05:48:46,903 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:48:46,904 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/0d942cb2025d:0, corePoolSize=2, maxPoolSize=2 2024-12-08T05:48:46,904 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:48:46,906 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733636956906 2024-12-08T05:48:46,908 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-08T05:48:46,909 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T05:48:46,909 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-08T05:48:46,909 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-08T05:48:46,913 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-08T05:48:46,914 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-08T05:48:46,915 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-08T05:48:46,916 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-08T05:48:46,917 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:48:46,917 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-08T05:48:46,918 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-08T05:48:46,921 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-08T05:48:46,923 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-08T05:48:46,923 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-08T05:48:46,926 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-08T05:48:46,927 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-08T05:48:46,929 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/0d942cb2025d:0:becomeActiveMaster-HFileCleaner.large.0-1733636926928,5,FailOnTimeoutGroup] 2024-12-08T05:48:46,930 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/0d942cb2025d:0:becomeActiveMaster-HFileCleaner.small.0-1733636926929,5,FailOnTimeoutGroup] 2024-12-08T05:48:46,930 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-08T05:48:46,930 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-08T05:48:46,932 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-08T05:48:46,932 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-08T05:48:46,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36757 is added to blk_1073741831_1007 (size=1321) 2024-12-08T05:48:46,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741831_1007 (size=1321) 2024-12-08T05:48:46,944 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-08T05:48:46,945 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976 2024-12-08T05:48:46,954 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56525, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-08T05:48:46,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741832_1008 (size=32) 2024-12-08T05:48:46,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36757 is added to blk_1073741832_1008 (size=32) 2024-12-08T05:48:46,962 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39457 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 0d942cb2025d,42137,1733636925929 2024-12-08T05:48:46,963 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T05:48:46,965 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-08T05:48:46,966 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39457 {}] master.ServerManager(517): Registering regionserver=0d942cb2025d,42137,1733636925929 2024-12-08T05:48:46,968 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-08T05:48:46,968 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:48:46,969 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:48:46,970 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-08T05:48:46,973 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-08T05:48:46,973 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:48:46,975 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:48:46,975 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-08T05:48:46,978 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-08T05:48:46,978 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:48:46,979 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:48:46,979 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-08T05:48:46,981 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-08T05:48:46,982 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:48:46,982 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:48:46,983 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-08T05:48:46,984 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/hbase/meta/1588230740 2024-12-08T05:48:46,985 DEBUG [RS:0;0d942cb2025d:42137 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976 2024-12-08T05:48:46,985 DEBUG [RS:0;0d942cb2025d:42137 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:45577 2024-12-08T05:48:46,985 DEBUG [RS:0;0d942cb2025d:42137 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-08T05:48:46,985 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/hbase/meta/1588230740 2024-12-08T05:48:46,988 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-08T05:48:46,988 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-08T05:48:46,990 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-08T05:48:46,990 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39457-0x101909e0e880000, quorum=127.0.0.1:49643, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T05:48:46,991 DEBUG [RS:0;0d942cb2025d:42137 {}] zookeeper.ZKUtil(111): regionserver:42137-0x101909e0e880001, quorum=127.0.0.1:49643, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/0d942cb2025d,42137,1733636925929 2024-12-08T05:48:46,991 WARN [RS:0;0d942cb2025d:42137 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T05:48:46,991 INFO [RS:0;0d942cb2025d:42137 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T05:48:46,992 DEBUG [RS:0;0d942cb2025d:42137 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/WALs/0d942cb2025d,42137,1733636925929 2024-12-08T05:48:46,993 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-08T05:48:46,994 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [0d942cb2025d,42137,1733636925929] 2024-12-08T05:48:47,000 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T05:48:47,001 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=863109, jitterRate=0.09750030934810638}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-08T05:48:47,003 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733636926963Initializing all the Stores at 1733636926965 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733636926965Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733636926965Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733636926965Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733636926965Cleaning up temporary data from old regions at 1733636926989 (+24 ms)Region opened successfully at 1733636927003 (+14 ms) 2024-12-08T05:48:47,003 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-08T05:48:47,003 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-08T05:48:47,003 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-08T05:48:47,003 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-08T05:48:47,003 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-08T05:48:47,005 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-08T05:48:47,005 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733636927003Disabling compacts and flushes for region at 1733636927003Disabling writes for close at 1733636927003Writing region close event to WAL at 1733636927004 (+1 ms)Closed at 1733636927005 (+1 ms) 2024-12-08T05:48:47,009 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T05:48:47,009 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-08T05:48:47,016 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-08T05:48:47,024 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-08T05:48:47,028 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-08T05:48:47,029 INFO [RS:0;0d942cb2025d:42137 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-08T05:48:47,043 INFO [RS:0;0d942cb2025d:42137 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-08T05:48:47,048 INFO [RS:0;0d942cb2025d:42137 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-08T05:48:47,048 INFO [RS:0;0d942cb2025d:42137 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T05:48:47,049 INFO [RS:0;0d942cb2025d:42137 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-08T05:48:47,055 INFO [RS:0;0d942cb2025d:42137 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-08T05:48:47,056 INFO [RS:0;0d942cb2025d:42137 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-08T05:48:47,057 DEBUG [RS:0;0d942cb2025d:42137 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:48:47,057 DEBUG [RS:0;0d942cb2025d:42137 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:48:47,057 DEBUG [RS:0;0d942cb2025d:42137 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:48:47,057 DEBUG [RS:0;0d942cb2025d:42137 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:48:47,058 DEBUG [RS:0;0d942cb2025d:42137 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:48:47,058 DEBUG [RS:0;0d942cb2025d:42137 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/0d942cb2025d:0, corePoolSize=2, maxPoolSize=2 2024-12-08T05:48:47,058 DEBUG [RS:0;0d942cb2025d:42137 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:48:47,058 DEBUG [RS:0;0d942cb2025d:42137 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:48:47,059 DEBUG [RS:0;0d942cb2025d:42137 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:48:47,059 DEBUG [RS:0;0d942cb2025d:42137 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:48:47,059 DEBUG [RS:0;0d942cb2025d:42137 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:48:47,059 DEBUG [RS:0;0d942cb2025d:42137 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:48:47,060 DEBUG [RS:0;0d942cb2025d:42137 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/0d942cb2025d:0, corePoolSize=3, maxPoolSize=3 2024-12-08T05:48:47,060 DEBUG [RS:0;0d942cb2025d:42137 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0, corePoolSize=3, maxPoolSize=3 2024-12-08T05:48:47,061 INFO [RS:0;0d942cb2025d:42137 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T05:48:47,061 INFO [RS:0;0d942cb2025d:42137 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T05:48:47,061 INFO [RS:0;0d942cb2025d:42137 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T05:48:47,062 INFO [RS:0;0d942cb2025d:42137 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-08T05:48:47,062 INFO [RS:0;0d942cb2025d:42137 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-08T05:48:47,062 INFO [RS:0;0d942cb2025d:42137 {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,42137,1733636925929-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T05:48:47,082 INFO [RS:0;0d942cb2025d:42137 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-08T05:48:47,085 INFO [RS:0;0d942cb2025d:42137 {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,42137,1733636925929-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T05:48:47,086 INFO [RS:0;0d942cb2025d:42137 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T05:48:47,086 INFO [RS:0;0d942cb2025d:42137 {}] regionserver.Replication(171): 0d942cb2025d,42137,1733636925929 started 2024-12-08T05:48:47,106 INFO [RS:0;0d942cb2025d:42137 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T05:48:47,106 INFO [RS:0;0d942cb2025d:42137 {}] regionserver.HRegionServer(1482): Serving as 0d942cb2025d,42137,1733636925929, RpcServer on 0d942cb2025d/172.17.0.2:42137, sessionid=0x101909e0e880001 2024-12-08T05:48:47,107 DEBUG [RS:0;0d942cb2025d:42137 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-08T05:48:47,107 DEBUG [RS:0;0d942cb2025d:42137 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 0d942cb2025d,42137,1733636925929 2024-12-08T05:48:47,107 DEBUG [RS:0;0d942cb2025d:42137 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0d942cb2025d,42137,1733636925929' 2024-12-08T05:48:47,108 DEBUG [RS:0;0d942cb2025d:42137 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-08T05:48:47,109 DEBUG [RS:0;0d942cb2025d:42137 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-08T05:48:47,117 DEBUG [RS:0;0d942cb2025d:42137 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-08T05:48:47,117 DEBUG [RS:0;0d942cb2025d:42137 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-08T05:48:47,117 DEBUG [RS:0;0d942cb2025d:42137 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 0d942cb2025d,42137,1733636925929 2024-12-08T05:48:47,117 DEBUG [RS:0;0d942cb2025d:42137 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0d942cb2025d,42137,1733636925929' 2024-12-08T05:48:47,117 DEBUG [RS:0;0d942cb2025d:42137 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-08T05:48:47,118 DEBUG [RS:0;0d942cb2025d:42137 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-08T05:48:47,119 DEBUG [RS:0;0d942cb2025d:42137 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-08T05:48:47,120 INFO [RS:0;0d942cb2025d:42137 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-08T05:48:47,120 INFO [RS:0;0d942cb2025d:42137 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-08T05:48:47,179 WARN [0d942cb2025d:39457 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-08T05:48:47,228 INFO [RS:0;0d942cb2025d:42137 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0d942cb2025d%2C42137%2C1733636925929, suffix=, logDir=hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/WALs/0d942cb2025d,42137,1733636925929, archiveDir=hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/oldWALs, maxLogs=32 2024-12-08T05:48:47,230 INFO [RS:0;0d942cb2025d:42137 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0d942cb2025d%2C42137%2C1733636925929.1733636927230 2024-12-08T05:48:47,239 INFO [RS:0;0d942cb2025d:42137 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/WALs/0d942cb2025d,42137,1733636925929/0d942cb2025d%2C42137%2C1733636925929.1733636927230 2024-12-08T05:48:47,240 DEBUG [RS:0;0d942cb2025d:42137 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37471:37471),(127.0.0.1/127.0.0.1:42719:42719)] 2024-12-08T05:48:47,432 DEBUG [0d942cb2025d:39457 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-08T05:48:47,444 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=0d942cb2025d,42137,1733636925929 2024-12-08T05:48:47,451 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0d942cb2025d,42137,1733636925929, state=OPENING 2024-12-08T05:48:47,456 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-08T05:48:47,458 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39457-0x101909e0e880000, quorum=127.0.0.1:49643, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:48:47,458 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42137-0x101909e0e880001, quorum=127.0.0.1:49643, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:48:47,459 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T05:48:47,459 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T05:48:47,460 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-08T05:48:47,463 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=0d942cb2025d,42137,1733636925929}] 2024-12-08T05:48:47,641 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-08T05:48:47,644 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57685, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-08T05:48:47,655 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-08T05:48:47,655 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T05:48:47,659 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0d942cb2025d%2C42137%2C1733636925929.meta, suffix=.meta, logDir=hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/WALs/0d942cb2025d,42137,1733636925929, archiveDir=hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/oldWALs, maxLogs=32 2024-12-08T05:48:47,661 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 0d942cb2025d%2C42137%2C1733636925929.meta.1733636927660.meta 2024-12-08T05:48:47,669 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/WALs/0d942cb2025d,42137,1733636925929/0d942cb2025d%2C42137%2C1733636925929.meta.1733636927660.meta 2024-12-08T05:48:47,670 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37471:37471),(127.0.0.1/127.0.0.1:42719:42719)] 2024-12-08T05:48:47,672 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-08T05:48:47,674 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-08T05:48:47,677 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-08T05:48:47,681 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-08T05:48:47,686 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-08T05:48:47,686 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T05:48:47,687 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-08T05:48:47,687 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-08T05:48:47,691 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-08T05:48:47,692 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-08T05:48:47,693 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:48:47,693 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:48:47,694 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-08T05:48:47,695 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-08T05:48:47,695 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:48:47,696 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:48:47,696 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-08T05:48:47,698 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-08T05:48:47,698 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:48:47,699 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:48:47,699 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-08T05:48:47,700 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-08T05:48:47,700 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:48:47,701 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:48:47,701 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-08T05:48:47,703 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/hbase/meta/1588230740 2024-12-08T05:48:47,705 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/hbase/meta/1588230740 2024-12-08T05:48:47,708 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-08T05:48:47,708 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-08T05:48:47,709 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-08T05:48:47,713 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-08T05:48:47,714 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=878795, jitterRate=0.11744636297225952}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-08T05:48:47,715 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-08T05:48:47,717 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733636927688Writing region info on filesystem at 1733636927688Initializing all the Stores at 1733636927690 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733636927690Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733636927691 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733636927691Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733636927691Cleaning up temporary data from old regions at 1733636927708 (+17 ms)Running coprocessor post-open hooks at 1733636927715 (+7 ms)Region opened successfully at 1733636927717 (+2 ms) 2024-12-08T05:48:47,726 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733636927632 2024-12-08T05:48:47,741 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-08T05:48:47,742 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-08T05:48:47,743 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=0d942cb2025d,42137,1733636925929 2024-12-08T05:48:47,745 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0d942cb2025d,42137,1733636925929, state=OPEN 2024-12-08T05:48:47,752 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42137-0x101909e0e880001, quorum=127.0.0.1:49643, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T05:48:47,752 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39457-0x101909e0e880000, quorum=127.0.0.1:49643, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T05:48:47,753 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T05:48:47,753 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T05:48:47,753 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=0d942cb2025d,42137,1733636925929 2024-12-08T05:48:47,762 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-08T05:48:47,762 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=0d942cb2025d,42137,1733636925929 in 291 msec 2024-12-08T05:48:47,770 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-08T05:48:47,770 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 749 msec 2024-12-08T05:48:47,772 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T05:48:47,772 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-08T05:48:47,793 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-08T05:48:47,795 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=0d942cb2025d,42137,1733636925929, seqNum=-1] 2024-12-08T05:48:47,821 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T05:48:47,823 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60909, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T05:48:47,844 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.0130 sec 2024-12-08T05:48:47,844 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733636927844, completionTime=-1 2024-12-08T05:48:47,847 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-08T05:48:47,848 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-08T05:48:47,875 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-08T05:48:47,875 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733636987875 2024-12-08T05:48:47,876 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733637047876 2024-12-08T05:48:47,876 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 27 msec 2024-12-08T05:48:47,878 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,39457,1733636925244-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T05:48:47,879 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,39457,1733636925244-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T05:48:47,879 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,39457,1733636925244-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T05:48:47,881 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-0d942cb2025d:39457, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T05:48:47,881 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-08T05:48:47,882 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-08T05:48:47,894 DEBUG [master/0d942cb2025d:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-08T05:48:47,914 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.841sec 2024-12-08T05:48:47,916 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-08T05:48:47,917 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-08T05:48:47,918 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-08T05:48:47,919 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-08T05:48:47,919 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-08T05:48:47,920 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,39457,1733636925244-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T05:48:47,920 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,39457,1733636925244-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-08T05:48:47,956 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-08T05:48:47,957 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-08T05:48:47,958 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,39457,1733636925244-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T05:48:47,976 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1c528490, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T05:48:47,982 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-08T05:48:47,983 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-08T05:48:47,986 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 0d942cb2025d,39457,-1 for getting cluster id 2024-12-08T05:48:47,989 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-08T05:48:47,999 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'cc5863f6-eac3-4d5b-ae71-ae63c5ee94ad' 2024-12-08T05:48:48,002 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-08T05:48:48,002 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "cc5863f6-eac3-4d5b-ae71-ae63c5ee94ad" 2024-12-08T05:48:48,003 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6c30f713, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T05:48:48,003 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [0d942cb2025d,39457,-1] 2024-12-08T05:48:48,006 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-08T05:48:48,008 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T05:48:48,010 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51528, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-08T05:48:48,013 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@46222a26, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T05:48:48,013 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-08T05:48:48,021 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=0d942cb2025d,42137,1733636925929, seqNum=-1] 2024-12-08T05:48:48,021 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T05:48:48,024 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55346, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T05:48:48,051 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=0d942cb2025d,39457,1733636925244 2024-12-08T05:48:48,052 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:48:48,062 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-08T05:48:48,067 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-08T05:48:48,073 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 0d942cb2025d,39457,1733636925244 2024-12-08T05:48:48,077 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@5fd5bef9 2024-12-08T05:48:48,078 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-08T05:48:48,081 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51530, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-08T05:48:48,084 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39457 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-08T05:48:48,084 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39457 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-08T05:48:48,089 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39457 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T05:48:48,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39457 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-12-08T05:48:48,101 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-08T05:48:48,104 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39457 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-12-08T05:48:48,104 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:48:48,107 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-08T05:48:48,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39457 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-08T05:48:48,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741835_1011 (size=389) 2024-12-08T05:48:48,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36757 is added to blk_1073741835_1011 (size=389) 2024-12-08T05:48:48,564 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => ea539f352c97cdd37f9496939ff0b706, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1733636928083.ea539f352c97cdd37f9496939ff0b706.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976 2024-12-08T05:48:48,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741836_1012 (size=72) 2024-12-08T05:48:48,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36757 is added to blk_1073741836_1012 (size=72) 2024-12-08T05:48:48,581 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1733636928083.ea539f352c97cdd37f9496939ff0b706.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T05:48:48,582 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing ea539f352c97cdd37f9496939ff0b706, disabling compactions & flushes 2024-12-08T05:48:48,582 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1733636928083.ea539f352c97cdd37f9496939ff0b706. 2024-12-08T05:48:48,582 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1733636928083.ea539f352c97cdd37f9496939ff0b706. 2024-12-08T05:48:48,582 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1733636928083.ea539f352c97cdd37f9496939ff0b706. after waiting 0 ms 2024-12-08T05:48:48,582 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1733636928083.ea539f352c97cdd37f9496939ff0b706. 2024-12-08T05:48:48,582 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1733636928083.ea539f352c97cdd37f9496939ff0b706. 2024-12-08T05:48:48,582 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for ea539f352c97cdd37f9496939ff0b706: Waiting for close lock at 1733636928582Disabling compacts and flushes for region at 1733636928582Disabling writes for close at 1733636928582Writing region close event to WAL at 1733636928582Closed at 1733636928582 2024-12-08T05:48:48,585 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-08T05:48:48,590 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1733636928083.ea539f352c97cdd37f9496939ff0b706.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1733636928585"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733636928585"}]},"ts":"1733636928585"} 2024-12-08T05:48:48,595 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-08T05:48:48,597 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-08T05:48:48,600 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733636928598"}]},"ts":"1733636928598"} 2024-12-08T05:48:48,605 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-12-08T05:48:48,607 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=ea539f352c97cdd37f9496939ff0b706, ASSIGN}] 2024-12-08T05:48:48,609 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=ea539f352c97cdd37f9496939ff0b706, ASSIGN 2024-12-08T05:48:48,611 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=ea539f352c97cdd37f9496939ff0b706, ASSIGN; state=OFFLINE, location=0d942cb2025d,42137,1733636925929; forceNewPlan=false, retain=false 2024-12-08T05:48:48,762 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=ea539f352c97cdd37f9496939ff0b706, regionState=OPENING, regionLocation=0d942cb2025d,42137,1733636925929 2024-12-08T05:48:48,767 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=ea539f352c97cdd37f9496939ff0b706, ASSIGN because future has completed 2024-12-08T05:48:48,768 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure ea539f352c97cdd37f9496939ff0b706, server=0d942cb2025d,42137,1733636925929}] 2024-12-08T05:48:48,929 INFO [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1733636928083.ea539f352c97cdd37f9496939ff0b706. 2024-12-08T05:48:48,930 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => ea539f352c97cdd37f9496939ff0b706, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1733636928083.ea539f352c97cdd37f9496939ff0b706.', STARTKEY => '', ENDKEY => ''} 2024-12-08T05:48:48,930 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling ea539f352c97cdd37f9496939ff0b706 2024-12-08T05:48:48,930 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1733636928083.ea539f352c97cdd37f9496939ff0b706.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T05:48:48,931 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for ea539f352c97cdd37f9496939ff0b706 2024-12-08T05:48:48,931 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for ea539f352c97cdd37f9496939ff0b706 2024-12-08T05:48:48,934 INFO [StoreOpener-ea539f352c97cdd37f9496939ff0b706-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region ea539f352c97cdd37f9496939ff0b706 2024-12-08T05:48:48,936 INFO [StoreOpener-ea539f352c97cdd37f9496939ff0b706-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ea539f352c97cdd37f9496939ff0b706 columnFamilyName info 2024-12-08T05:48:48,936 DEBUG [StoreOpener-ea539f352c97cdd37f9496939ff0b706-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:48:48,937 INFO [StoreOpener-ea539f352c97cdd37f9496939ff0b706-1 {}] regionserver.HStore(327): Store=ea539f352c97cdd37f9496939ff0b706/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T05:48:48,938 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for ea539f352c97cdd37f9496939ff0b706 2024-12-08T05:48:48,939 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/default/TestLogRolling-testSlowSyncLogRolling/ea539f352c97cdd37f9496939ff0b706 2024-12-08T05:48:48,940 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/default/TestLogRolling-testSlowSyncLogRolling/ea539f352c97cdd37f9496939ff0b706 2024-12-08T05:48:48,940 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for ea539f352c97cdd37f9496939ff0b706 2024-12-08T05:48:48,940 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for ea539f352c97cdd37f9496939ff0b706 2024-12-08T05:48:48,943 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for ea539f352c97cdd37f9496939ff0b706 2024-12-08T05:48:48,946 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/default/TestLogRolling-testSlowSyncLogRolling/ea539f352c97cdd37f9496939ff0b706/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T05:48:48,947 INFO [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened ea539f352c97cdd37f9496939ff0b706; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=816750, jitterRate=0.03855147957801819}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-08T05:48:48,947 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ea539f352c97cdd37f9496939ff0b706 2024-12-08T05:48:48,948 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for ea539f352c97cdd37f9496939ff0b706: Running coprocessor pre-open hook at 1733636928931Writing region info on filesystem at 1733636928931Initializing all the Stores at 1733636928933 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733636928933Cleaning up temporary data from old regions at 1733636928940 (+7 ms)Running coprocessor post-open hooks at 1733636928947 (+7 ms)Region opened successfully at 1733636928948 (+1 ms) 2024-12-08T05:48:48,950 INFO [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1733636928083.ea539f352c97cdd37f9496939ff0b706., pid=6, masterSystemTime=1733636928922 2024-12-08T05:48:48,954 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1733636928083.ea539f352c97cdd37f9496939ff0b706. 2024-12-08T05:48:48,954 INFO [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1733636928083.ea539f352c97cdd37f9496939ff0b706. 2024-12-08T05:48:48,955 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=ea539f352c97cdd37f9496939ff0b706, regionState=OPEN, openSeqNum=2, regionLocation=0d942cb2025d,42137,1733636925929 2024-12-08T05:48:48,960 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure ea539f352c97cdd37f9496939ff0b706, server=0d942cb2025d,42137,1733636925929 because future has completed 2024-12-08T05:48:48,967 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-08T05:48:48,967 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure ea539f352c97cdd37f9496939ff0b706, server=0d942cb2025d,42137,1733636925929 in 194 msec 2024-12-08T05:48:48,972 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-08T05:48:48,972 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=ea539f352c97cdd37f9496939ff0b706, ASSIGN in 360 msec 2024-12-08T05:48:48,974 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-08T05:48:48,974 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733636928974"}]},"ts":"1733636928974"} 2024-12-08T05:48:48,978 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-12-08T05:48:48,988 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-08T05:48:48,993 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 897 msec 2024-12-08T05:48:53,189 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-08T05:48:53,235 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-08T05:48:53,236 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-12-08T05:48:55,649 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-08T05:48:55,649 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-08T05:48:55,651 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-12-08T05:48:55,651 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-12-08T05:48:55,652 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-08T05:48:55,652 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-08T05:48:55,653 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-08T05:48:55,653 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-08T05:48:58,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39457 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-08T05:48:58,169 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-12-08T05:48:58,171 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-12-08T05:48:58,178 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-12-08T05:48:58,179 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1733636928083.ea539f352c97cdd37f9496939ff0b706. 2024-12-08T05:48:58,180 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0d942cb2025d%2C42137%2C1733636925929.1733636938179 2024-12-08T05:48:58,188 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:48:58,188 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:48:58,188 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:48:58,188 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:48:58,189 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:48:58,189 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/WALs/0d942cb2025d,42137,1733636925929/0d942cb2025d%2C42137%2C1733636925929.1733636927230 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/WALs/0d942cb2025d,42137,1733636925929/0d942cb2025d%2C42137%2C1733636925929.1733636938179 2024-12-08T05:48:58,190 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42719:42719),(127.0.0.1/127.0.0.1:37471:37471)] 2024-12-08T05:48:58,190 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/WALs/0d942cb2025d,42137,1733636925929/0d942cb2025d%2C42137%2C1733636925929.1733636927230 is not closed yet, will try archiving it next time 2024-12-08T05:48:58,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741833_1009 (size=451) 2024-12-08T05:48:58,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36757 is added to blk_1073741833_1009 (size=451) 2024-12-08T05:48:58,194 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/WALs/0d942cb2025d,42137,1733636925929/0d942cb2025d%2C42137%2C1733636925929.1733636927230 to hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/oldWALs/0d942cb2025d%2C42137%2C1733636925929.1733636927230 2024-12-08T05:48:58,199 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1733636928083.ea539f352c97cdd37f9496939ff0b706., hostname=0d942cb2025d,42137,1733636925929, seqNum=2] 2024-12-08T05:49:10,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42137 {}] regionserver.HRegion(8855): Flush requested on ea539f352c97cdd37f9496939ff0b706 2024-12-08T05:49:10,234 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ea539f352c97cdd37f9496939ff0b706 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-08T05:49:10,294 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/default/TestLogRolling-testSlowSyncLogRolling/ea539f352c97cdd37f9496939ff0b706/.tmp/info/da091eb26b01448d97943cf64cf8e44b is 1080, key is row0001/info:/1733636938201/Put/seqid=0 2024-12-08T05:49:10,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36757 is added to blk_1073741838_1014 (size=12509) 2024-12-08T05:49:10,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741838_1014 (size=12509) 2024-12-08T05:49:10,307 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/default/TestLogRolling-testSlowSyncLogRolling/ea539f352c97cdd37f9496939ff0b706/.tmp/info/da091eb26b01448d97943cf64cf8e44b 2024-12-08T05:49:10,356 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/default/TestLogRolling-testSlowSyncLogRolling/ea539f352c97cdd37f9496939ff0b706/.tmp/info/da091eb26b01448d97943cf64cf8e44b as hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/default/TestLogRolling-testSlowSyncLogRolling/ea539f352c97cdd37f9496939ff0b706/info/da091eb26b01448d97943cf64cf8e44b 2024-12-08T05:49:10,366 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/default/TestLogRolling-testSlowSyncLogRolling/ea539f352c97cdd37f9496939ff0b706/info/da091eb26b01448d97943cf64cf8e44b, entries=7, sequenceid=11, filesize=12.2 K 2024-12-08T05:49:10,373 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for ea539f352c97cdd37f9496939ff0b706 in 139ms, sequenceid=11, compaction requested=false 2024-12-08T05:49:10,374 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ea539f352c97cdd37f9496939ff0b706: 2024-12-08T05:49:14,348 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-08T05:49:18,245 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0d942cb2025d%2C42137%2C1733636925929.1733636958244 2024-12-08T05:49:18,455 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 207 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45301,DS-0ab98241-3aa4-4877-b9bd-6456d6f05af2,DISK], DatanodeInfoWithStorage[127.0.0.1:36757,DS-cf3b8273-3ba7-4eff-9a12-93a646be6713,DISK]] 2024-12-08T05:49:18,455 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:49:18,455 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:49:18,456 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:49:18,456 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:49:18,456 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:49:18,457 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/WALs/0d942cb2025d,42137,1733636925929/0d942cb2025d%2C42137%2C1733636925929.1733636938179 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/WALs/0d942cb2025d,42137,1733636925929/0d942cb2025d%2C42137%2C1733636925929.1733636958244 2024-12-08T05:49:18,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36757 is added to blk_1073741837_1013 (size=12399) 2024-12-08T05:49:18,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741837_1013 (size=12399) 2024-12-08T05:49:18,465 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37471:37471),(127.0.0.1/127.0.0.1:42719:42719)] 2024-12-08T05:49:18,668 INFO [FSHLog-0-hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976-prefix:0d942cb2025d,42137,1733636925929 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36757,DS-cf3b8273-3ba7-4eff-9a12-93a646be6713,DISK], DatanodeInfoWithStorage[127.0.0.1:45301,DS-0ab98241-3aa4-4877-b9bd-6456d6f05af2,DISK]] 2024-12-08T05:49:20,872 INFO [FSHLog-0-hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976-prefix:0d942cb2025d,42137,1733636925929 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36757,DS-cf3b8273-3ba7-4eff-9a12-93a646be6713,DISK], DatanodeInfoWithStorage[127.0.0.1:45301,DS-0ab98241-3aa4-4877-b9bd-6456d6f05af2,DISK]] 2024-12-08T05:49:23,076 INFO [FSHLog-0-hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976-prefix:0d942cb2025d,42137,1733636925929 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36757,DS-cf3b8273-3ba7-4eff-9a12-93a646be6713,DISK], DatanodeInfoWithStorage[127.0.0.1:45301,DS-0ab98241-3aa4-4877-b9bd-6456d6f05af2,DISK]] 2024-12-08T05:49:25,280 INFO [FSHLog-0-hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976-prefix:0d942cb2025d,42137,1733636925929 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36757,DS-cf3b8273-3ba7-4eff-9a12-93a646be6713,DISK], DatanodeInfoWithStorage[127.0.0.1:45301,DS-0ab98241-3aa4-4877-b9bd-6456d6f05af2,DISK]] 2024-12-08T05:49:25,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42137 {}] regionserver.HRegion(8855): Flush requested on ea539f352c97cdd37f9496939ff0b706 2024-12-08T05:49:25,281 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ea539f352c97cdd37f9496939ff0b706 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-08T05:49:25,482 INFO [FSHLog-0-hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976-prefix:0d942cb2025d,42137,1733636925929 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36757,DS-cf3b8273-3ba7-4eff-9a12-93a646be6713,DISK], DatanodeInfoWithStorage[127.0.0.1:45301,DS-0ab98241-3aa4-4877-b9bd-6456d6f05af2,DISK]] 2024-12-08T05:49:25,488 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/default/TestLogRolling-testSlowSyncLogRolling/ea539f352c97cdd37f9496939ff0b706/.tmp/info/dba8c328f93443faae50612eced2897e is 1080, key is row0008/info:/1733636952232/Put/seqid=0 2024-12-08T05:49:25,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741840_1016 (size=12509) 2024-12-08T05:49:25,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36757 is added to blk_1073741840_1016 (size=12509) 2024-12-08T05:49:25,497 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/default/TestLogRolling-testSlowSyncLogRolling/ea539f352c97cdd37f9496939ff0b706/.tmp/info/dba8c328f93443faae50612eced2897e 2024-12-08T05:49:25,509 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/default/TestLogRolling-testSlowSyncLogRolling/ea539f352c97cdd37f9496939ff0b706/.tmp/info/dba8c328f93443faae50612eced2897e as hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/default/TestLogRolling-testSlowSyncLogRolling/ea539f352c97cdd37f9496939ff0b706/info/dba8c328f93443faae50612eced2897e 2024-12-08T05:49:25,518 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/default/TestLogRolling-testSlowSyncLogRolling/ea539f352c97cdd37f9496939ff0b706/info/dba8c328f93443faae50612eced2897e, entries=7, sequenceid=21, filesize=12.2 K 2024-12-08T05:49:25,720 INFO [FSHLog-0-hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976-prefix:0d942cb2025d,42137,1733636925929 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36757,DS-cf3b8273-3ba7-4eff-9a12-93a646be6713,DISK], DatanodeInfoWithStorage[127.0.0.1:45301,DS-0ab98241-3aa4-4877-b9bd-6456d6f05af2,DISK]] 2024-12-08T05:49:25,721 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for ea539f352c97cdd37f9496939ff0b706 in 440ms, sequenceid=21, compaction requested=false 2024-12-08T05:49:25,721 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ea539f352c97cdd37f9496939ff0b706: 2024-12-08T05:49:25,721 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-12-08T05:49:25,721 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T05:49:25,723 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/default/TestLogRolling-testSlowSyncLogRolling/ea539f352c97cdd37f9496939ff0b706/info/da091eb26b01448d97943cf64cf8e44b because midkey is the same as first or last row 2024-12-08T05:49:27,485 INFO [FSHLog-0-hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976-prefix:0d942cb2025d,42137,1733636925929 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36757,DS-cf3b8273-3ba7-4eff-9a12-93a646be6713,DISK], DatanodeInfoWithStorage[127.0.0.1:45301,DS-0ab98241-3aa4-4877-b9bd-6456d6f05af2,DISK]] 2024-12-08T05:49:27,964 INFO [master/0d942cb2025d:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-08T05:49:27,964 INFO [master/0d942cb2025d:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-08T05:49:29,690 INFO [FSHLog-0-hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976-prefix:0d942cb2025d,42137,1733636925929 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36757,DS-cf3b8273-3ba7-4eff-9a12-93a646be6713,DISK], DatanodeInfoWithStorage[127.0.0.1:45301,DS-0ab98241-3aa4-4877-b9bd-6456d6f05af2,DISK]] 2024-12-08T05:49:29,692 WARN [FSHLog-0-hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976-prefix:0d942cb2025d,42137,1733636925929 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36757,DS-cf3b8273-3ba7-4eff-9a12-93a646be6713,DISK], DatanodeInfoWithStorage[127.0.0.1:45301,DS-0ab98241-3aa4-4877-b9bd-6456d6f05af2,DISK]] 2024-12-08T05:49:29,693 DEBUG [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 0d942cb2025d%2C42137%2C1733636925929:(num 1733636958244) roll requested 2024-12-08T05:49:29,693 INFO [regionserver/0d942cb2025d:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0d942cb2025d%2C42137%2C1733636925929.1733636969693 2024-12-08T05:49:29,901 INFO [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 205 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36757,DS-cf3b8273-3ba7-4eff-9a12-93a646be6713,DISK], DatanodeInfoWithStorage[127.0.0.1:45301,DS-0ab98241-3aa4-4877-b9bd-6456d6f05af2,DISK]] 2024-12-08T05:49:29,901 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:49:29,902 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:49:29,902 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:49:29,902 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:49:29,902 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:49:29,902 INFO [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/WALs/0d942cb2025d,42137,1733636925929/0d942cb2025d%2C42137%2C1733636925929.1733636958244 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/WALs/0d942cb2025d,42137,1733636925929/0d942cb2025d%2C42137%2C1733636925929.1733636969693 2024-12-08T05:49:29,903 DEBUG [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42719:42719),(127.0.0.1/127.0.0.1:37471:37471)] 2024-12-08T05:49:29,903 DEBUG [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/WALs/0d942cb2025d,42137,1733636925929/0d942cb2025d%2C42137%2C1733636925929.1733636958244 is not closed yet, will try archiving it next time 2024-12-08T05:49:29,904 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/WALs/0d942cb2025d,42137,1733636925929/0d942cb2025d%2C42137%2C1733636925929.1733636938179 to hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/oldWALs/0d942cb2025d%2C42137%2C1733636925929.1733636938179 2024-12-08T05:49:29,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741839_1015 (size=7739) 2024-12-08T05:49:29,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36757 is added to blk_1073741839_1015 (size=7739) 2024-12-08T05:49:31,894 INFO [FSHLog-0-hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976-prefix:0d942cb2025d,42137,1733636925929 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45301,DS-0ab98241-3aa4-4877-b9bd-6456d6f05af2,DISK], DatanodeInfoWithStorage[127.0.0.1:36757,DS-cf3b8273-3ba7-4eff-9a12-93a646be6713,DISK]] 2024-12-08T05:49:33,931 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region ea539f352c97cdd37f9496939ff0b706, had cached 0 bytes from a total of 25018 2024-12-08T05:49:34,099 INFO [FSHLog-0-hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976-prefix:0d942cb2025d,42137,1733636925929 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45301,DS-0ab98241-3aa4-4877-b9bd-6456d6f05af2,DISK], DatanodeInfoWithStorage[127.0.0.1:36757,DS-cf3b8273-3ba7-4eff-9a12-93a646be6713,DISK]] 2024-12-08T05:49:36,303 INFO [FSHLog-0-hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976-prefix:0d942cb2025d,42137,1733636925929 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45301,DS-0ab98241-3aa4-4877-b9bd-6456d6f05af2,DISK], DatanodeInfoWithStorage[127.0.0.1:36757,DS-cf3b8273-3ba7-4eff-9a12-93a646be6713,DISK]] 2024-12-08T05:49:38,507 INFO [FSHLog-0-hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976-prefix:0d942cb2025d,42137,1733636925929 {}] wal.AbstractFSWAL(1368): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45301,DS-0ab98241-3aa4-4877-b9bd-6456d6f05af2,DISK], DatanodeInfoWithStorage[127.0.0.1:36757,DS-cf3b8273-3ba7-4eff-9a12-93a646be6713,DISK]] 2024-12-08T05:49:40,509 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-08T05:49:40,509 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0d942cb2025d%2C42137%2C1733636925929.1733636980509 2024-12-08T05:49:44,348 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-08T05:49:45,519 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5006 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45301,DS-0ab98241-3aa4-4877-b9bd-6456d6f05af2,DISK], DatanodeInfoWithStorage[127.0.0.1:36757,DS-cf3b8273-3ba7-4eff-9a12-93a646be6713,DISK]] 2024-12-08T05:49:45,521 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5006 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45301,DS-0ab98241-3aa4-4877-b9bd-6456d6f05af2,DISK], DatanodeInfoWithStorage[127.0.0.1:36757,DS-cf3b8273-3ba7-4eff-9a12-93a646be6713,DISK]] 2024-12-08T05:49:45,521 DEBUG [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 0d942cb2025d%2C42137%2C1733636925929:(num 1733636980509) roll requested 2024-12-08T05:49:45,521 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:49:45,522 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:49:45,522 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:49:45,522 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:49:45,522 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:49:45,522 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/WALs/0d942cb2025d,42137,1733636925929/0d942cb2025d%2C42137%2C1733636925929.1733636969693 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/WALs/0d942cb2025d,42137,1733636925929/0d942cb2025d%2C42137%2C1733636925929.1733636980509 2024-12-08T05:49:45,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36757 is added to blk_1073741841_1017 (size=4753) 2024-12-08T05:49:45,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741841_1017 (size=4753) 2024-12-08T05:49:45,530 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42719:42719),(127.0.0.1/127.0.0.1:37471:37471)] 2024-12-08T05:49:45,530 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/WALs/0d942cb2025d,42137,1733636925929/0d942cb2025d%2C42137%2C1733636925929.1733636969693 is not closed yet, will try archiving it next time 2024-12-08T05:49:45,531 INFO [regionserver/0d942cb2025d:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0d942cb2025d%2C42137%2C1733636925929.1733636985530 2024-12-08T05:49:50,534 INFO [FSHLog-0-hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976-prefix:0d942cb2025d,42137,1733636925929 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45301,DS-0ab98241-3aa4-4877-b9bd-6456d6f05af2,DISK], DatanodeInfoWithStorage[127.0.0.1:36757,DS-cf3b8273-3ba7-4eff-9a12-93a646be6713,DISK]] 2024-12-08T05:49:50,534 WARN [FSHLog-0-hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976-prefix:0d942cb2025d,42137,1733636925929 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45301,DS-0ab98241-3aa4-4877-b9bd-6456d6f05af2,DISK], DatanodeInfoWithStorage[127.0.0.1:36757,DS-cf3b8273-3ba7-4eff-9a12-93a646be6713,DISK]] 2024-12-08T05:49:50,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42137 {}] regionserver.HRegion(8855): Flush requested on ea539f352c97cdd37f9496939ff0b706 2024-12-08T05:49:50,535 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ea539f352c97cdd37f9496939ff0b706 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-08T05:49:50,539 INFO [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5006 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45301,DS-0ab98241-3aa4-4877-b9bd-6456d6f05af2,DISK], DatanodeInfoWithStorage[127.0.0.1:36757,DS-cf3b8273-3ba7-4eff-9a12-93a646be6713,DISK]] 2024-12-08T05:49:50,539 WARN [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5006 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45301,DS-0ab98241-3aa4-4877-b9bd-6456d6f05af2,DISK], DatanodeInfoWithStorage[127.0.0.1:36757,DS-cf3b8273-3ba7-4eff-9a12-93a646be6713,DISK]] 2024-12-08T05:49:52,535 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-08T05:49:55,537 INFO [FSHLog-0-hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976-prefix:0d942cb2025d,42137,1733636925929 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45301,DS-0ab98241-3aa4-4877-b9bd-6456d6f05af2,DISK], DatanodeInfoWithStorage[127.0.0.1:36757,DS-cf3b8273-3ba7-4eff-9a12-93a646be6713,DISK]] 2024-12-08T05:49:55,537 WARN [FSHLog-0-hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976-prefix:0d942cb2025d,42137,1733636925929 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45301,DS-0ab98241-3aa4-4877-b9bd-6456d6f05af2,DISK], DatanodeInfoWithStorage[127.0.0.1:36757,DS-cf3b8273-3ba7-4eff-9a12-93a646be6713,DISK]] 2024-12-08T05:49:55,537 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:49:55,537 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:49:55,537 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:49:55,538 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:49:55,538 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:49:55,538 INFO [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/WALs/0d942cb2025d,42137,1733636925929/0d942cb2025d%2C42137%2C1733636925929.1733636980509 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/WALs/0d942cb2025d,42137,1733636925929/0d942cb2025d%2C42137%2C1733636925929.1733636985530 2024-12-08T05:49:55,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36757 is added to blk_1073741842_1018 (size=1569) 2024-12-08T05:49:55,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741842_1018 (size=1569) 2024-12-08T05:49:55,544 DEBUG [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37471:37471),(127.0.0.1/127.0.0.1:42719:42719)] 2024-12-08T05:49:55,545 DEBUG [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/WALs/0d942cb2025d,42137,1733636925929/0d942cb2025d%2C42137%2C1733636925929.1733636980509 is not closed yet, will try archiving it next time 2024-12-08T05:49:55,545 DEBUG [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 0d942cb2025d%2C42137%2C1733636925929:(num 1733636985530) roll requested 2024-12-08T05:49:55,545 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/default/TestLogRolling-testSlowSyncLogRolling/ea539f352c97cdd37f9496939ff0b706/.tmp/info/69e835c8b9b2430fb15f7bc17c9e19a7 is 1080, key is row0015/info:/1733636967283/Put/seqid=0 2024-12-08T05:49:55,545 INFO [regionserver/0d942cb2025d:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0d942cb2025d%2C42137%2C1733636925929.1733636995545 2024-12-08T05:49:55,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36757 is added to blk_1073741844_1020 (size=12509) 2024-12-08T05:49:55,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741844_1020 (size=12509) 2024-12-08T05:49:55,559 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/default/TestLogRolling-testSlowSyncLogRolling/ea539f352c97cdd37f9496939ff0b706/.tmp/info/69e835c8b9b2430fb15f7bc17c9e19a7 2024-12-08T05:49:55,571 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/default/TestLogRolling-testSlowSyncLogRolling/ea539f352c97cdd37f9496939ff0b706/.tmp/info/69e835c8b9b2430fb15f7bc17c9e19a7 as hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/default/TestLogRolling-testSlowSyncLogRolling/ea539f352c97cdd37f9496939ff0b706/info/69e835c8b9b2430fb15f7bc17c9e19a7 2024-12-08T05:49:55,580 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/default/TestLogRolling-testSlowSyncLogRolling/ea539f352c97cdd37f9496939ff0b706/info/69e835c8b9b2430fb15f7bc17c9e19a7, entries=7, sequenceid=31, filesize=12.2 K 2024-12-08T05:50:00,573 INFO [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5024 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36757,DS-cf3b8273-3ba7-4eff-9a12-93a646be6713,DISK], DatanodeInfoWithStorage[127.0.0.1:45301,DS-0ab98241-3aa4-4877-b9bd-6456d6f05af2,DISK]] 2024-12-08T05:50:00,573 WARN [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5024 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36757,DS-cf3b8273-3ba7-4eff-9a12-93a646be6713,DISK], DatanodeInfoWithStorage[127.0.0.1:45301,DS-0ab98241-3aa4-4877-b9bd-6456d6f05af2,DISK]] 2024-12-08T05:50:00,582 INFO [FSHLog-0-hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976-prefix:0d942cb2025d,42137,1733636925929 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36757,DS-cf3b8273-3ba7-4eff-9a12-93a646be6713,DISK], DatanodeInfoWithStorage[127.0.0.1:45301,DS-0ab98241-3aa4-4877-b9bd-6456d6f05af2,DISK]] 2024-12-08T05:50:00,582 WARN [FSHLog-0-hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976-prefix:0d942cb2025d,42137,1733636925929 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36757,DS-cf3b8273-3ba7-4eff-9a12-93a646be6713,DISK], DatanodeInfoWithStorage[127.0.0.1:45301,DS-0ab98241-3aa4-4877-b9bd-6456d6f05af2,DISK]] 2024-12-08T05:50:00,582 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for ea539f352c97cdd37f9496939ff0b706 in 10048ms, sequenceid=31, compaction requested=true 2024-12-08T05:50:00,582 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:00,583 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ea539f352c97cdd37f9496939ff0b706: 2024-12-08T05:50:00,583 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:00,583 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:00,583 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-12-08T05:50:00,583 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T05:50:00,583 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:00,583 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/default/TestLogRolling-testSlowSyncLogRolling/ea539f352c97cdd37f9496939ff0b706/info/da091eb26b01448d97943cf64cf8e44b because midkey is the same as first or last row 2024-12-08T05:50:00,583 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:00,583 INFO [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/WALs/0d942cb2025d,42137,1733636925929/0d942cb2025d%2C42137%2C1733636925929.1733636985530 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/WALs/0d942cb2025d,42137,1733636925929/0d942cb2025d%2C42137%2C1733636925929.1733636995545 2024-12-08T05:50:00,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741843_1019 (size=438) 2024-12-08T05:50:00,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36757 is added to blk_1073741843_1019 (size=438) 2024-12-08T05:50:00,587 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/WALs/0d942cb2025d,42137,1733636925929/0d942cb2025d%2C42137%2C1733636925929.1733636958244 to hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/oldWALs/0d942cb2025d%2C42137%2C1733636925929.1733636958244 2024-12-08T05:50:00,589 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ea539f352c97cdd37f9496939ff0b706:info, priority=-2147483648, current under compaction store size is 1 2024-12-08T05:50:00,589 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/WALs/0d942cb2025d,42137,1733636925929/0d942cb2025d%2C42137%2C1733636925929.1733636969693 to hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/oldWALs/0d942cb2025d%2C42137%2C1733636925929.1733636969693 2024-12-08T05:50:00,591 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/WALs/0d942cb2025d,42137,1733636925929/0d942cb2025d%2C42137%2C1733636925929.1733636980509 to hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/oldWALs/0d942cb2025d%2C42137%2C1733636925929.1733636980509 2024-12-08T05:50:00,591 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T05:50:00,592 DEBUG [RS:0;0d942cb2025d:42137-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T05:50:00,592 DEBUG [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42719:42719),(127.0.0.1/127.0.0.1:37471:37471)] 2024-12-08T05:50:00,592 DEBUG [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 0d942cb2025d%2C42137%2C1733636925929:(num 1733637000592) roll requested 2024-12-08T05:50:00,593 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0d942cb2025d%2C42137%2C1733636925929.1733637000592 2024-12-08T05:50:00,594 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/WALs/0d942cb2025d,42137,1733636925929/0d942cb2025d%2C42137%2C1733636925929.1733636985530 to hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/oldWALs/0d942cb2025d%2C42137%2C1733636925929.1733636985530 2024-12-08T05:50:00,596 DEBUG [RS:0;0d942cb2025d:42137-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T05:50:00,598 DEBUG [RS:0;0d942cb2025d:42137-shortCompactions-0 {}] regionserver.HStore(1541): ea539f352c97cdd37f9496939ff0b706/info is initiating minor compaction (all files) 2024-12-08T05:50:00,598 INFO [RS:0;0d942cb2025d:42137-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of ea539f352c97cdd37f9496939ff0b706/info in TestLogRolling-testSlowSyncLogRolling,,1733636928083.ea539f352c97cdd37f9496939ff0b706. 2024-12-08T05:50:00,598 INFO [RS:0;0d942cb2025d:42137-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/default/TestLogRolling-testSlowSyncLogRolling/ea539f352c97cdd37f9496939ff0b706/info/da091eb26b01448d97943cf64cf8e44b, hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/default/TestLogRolling-testSlowSyncLogRolling/ea539f352c97cdd37f9496939ff0b706/info/dba8c328f93443faae50612eced2897e, hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/default/TestLogRolling-testSlowSyncLogRolling/ea539f352c97cdd37f9496939ff0b706/info/69e835c8b9b2430fb15f7bc17c9e19a7] into tmpdir=hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/default/TestLogRolling-testSlowSyncLogRolling/ea539f352c97cdd37f9496939ff0b706/.tmp, totalSize=36.6 K 2024-12-08T05:50:00,600 DEBUG [RS:0;0d942cb2025d:42137-shortCompactions-0 {}] compactions.Compactor(225): Compacting da091eb26b01448d97943cf64cf8e44b, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733636938201 2024-12-08T05:50:00,600 DEBUG [RS:0;0d942cb2025d:42137-shortCompactions-0 {}] compactions.Compactor(225): Compacting dba8c328f93443faae50612eced2897e, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1733636952232 2024-12-08T05:50:00,601 DEBUG [RS:0;0d942cb2025d:42137-shortCompactions-0 {}] compactions.Compactor(225): Compacting 69e835c8b9b2430fb15f7bc17c9e19a7, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1733636967283 2024-12-08T05:50:00,624 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:00,625 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:00,625 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:00,625 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:00,626 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:00,627 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/WALs/0d942cb2025d,42137,1733636925929/0d942cb2025d%2C42137%2C1733636925929.1733636995545 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/WALs/0d942cb2025d,42137,1733636925929/0d942cb2025d%2C42137%2C1733636925929.1733637000592 2024-12-08T05:50:00,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36757 is added to blk_1073741845_1021 (size=93) 2024-12-08T05:50:00,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741845_1021 (size=93) 2024-12-08T05:50:00,632 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/WALs/0d942cb2025d,42137,1733636925929/0d942cb2025d%2C42137%2C1733636925929.1733636995545 to hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/oldWALs/0d942cb2025d%2C42137%2C1733636925929.1733636995545 2024-12-08T05:50:00,645 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37471:37471),(127.0.0.1/127.0.0.1:42719:42719)] 2024-12-08T05:50:00,646 INFO [regionserver/0d942cb2025d:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0d942cb2025d%2C42137%2C1733636925929.1733637000646 2024-12-08T05:50:00,666 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:00,666 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:00,666 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:00,667 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:00,667 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:00,667 INFO [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/WALs/0d942cb2025d,42137,1733636925929/0d942cb2025d%2C42137%2C1733636925929.1733637000592 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/WALs/0d942cb2025d,42137,1733636925929/0d942cb2025d%2C42137%2C1733636925929.1733637000646 2024-12-08T05:50:00,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741846_1022 (size=1258) 2024-12-08T05:50:00,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36757 is added to blk_1073741846_1022 (size=1258) 2024-12-08T05:50:00,674 INFO [RS:0;0d942cb2025d:42137-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ea539f352c97cdd37f9496939ff0b706#info#compaction#3 average throughput is 7.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T05:50:00,675 DEBUG [RS:0;0d942cb2025d:42137-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/default/TestLogRolling-testSlowSyncLogRolling/ea539f352c97cdd37f9496939ff0b706/.tmp/info/622426718be240249090f5cf1c09ff27 is 1080, key is row0001/info:/1733636938201/Put/seqid=0 2024-12-08T05:50:00,677 DEBUG [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42719:42719),(127.0.0.1/127.0.0.1:37471:37471)] 2024-12-08T05:50:00,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741848_1024 (size=27710) 2024-12-08T05:50:00,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36757 is added to blk_1073741848_1024 (size=27710) 2024-12-08T05:50:00,700 DEBUG [RS:0;0d942cb2025d:42137-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/default/TestLogRolling-testSlowSyncLogRolling/ea539f352c97cdd37f9496939ff0b706/.tmp/info/622426718be240249090f5cf1c09ff27 as hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/default/TestLogRolling-testSlowSyncLogRolling/ea539f352c97cdd37f9496939ff0b706/info/622426718be240249090f5cf1c09ff27 2024-12-08T05:50:00,722 INFO [RS:0;0d942cb2025d:42137-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in ea539f352c97cdd37f9496939ff0b706/info of ea539f352c97cdd37f9496939ff0b706 into 622426718be240249090f5cf1c09ff27(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T05:50:00,722 DEBUG [RS:0;0d942cb2025d:42137-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for ea539f352c97cdd37f9496939ff0b706: 2024-12-08T05:50:00,725 INFO [RS:0;0d942cb2025d:42137-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1733636928083.ea539f352c97cdd37f9496939ff0b706., storeName=ea539f352c97cdd37f9496939ff0b706/info, priority=13, startTime=1733637000585; duration=0sec 2024-12-08T05:50:00,726 DEBUG [RS:0;0d942cb2025d:42137-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-12-08T05:50:00,726 DEBUG [RS:0;0d942cb2025d:42137-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T05:50:00,726 DEBUG [RS:0;0d942cb2025d:42137-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/default/TestLogRolling-testSlowSyncLogRolling/ea539f352c97cdd37f9496939ff0b706/info/622426718be240249090f5cf1c09ff27 because midkey is the same as first or last row 2024-12-08T05:50:00,727 DEBUG [RS:0;0d942cb2025d:42137-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-12-08T05:50:00,727 DEBUG [RS:0;0d942cb2025d:42137-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T05:50:00,727 DEBUG [RS:0;0d942cb2025d:42137-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/default/TestLogRolling-testSlowSyncLogRolling/ea539f352c97cdd37f9496939ff0b706/info/622426718be240249090f5cf1c09ff27 because midkey is the same as first or last row 2024-12-08T05:50:00,727 DEBUG [RS:0;0d942cb2025d:42137-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-12-08T05:50:00,727 DEBUG [RS:0;0d942cb2025d:42137-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T05:50:00,727 DEBUG [RS:0;0d942cb2025d:42137-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/default/TestLogRolling-testSlowSyncLogRolling/ea539f352c97cdd37f9496939ff0b706/info/622426718be240249090f5cf1c09ff27 because midkey is the same as first or last row 2024-12-08T05:50:00,727 DEBUG [RS:0;0d942cb2025d:42137-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T05:50:00,728 DEBUG [RS:0;0d942cb2025d:42137-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ea539f352c97cdd37f9496939ff0b706:info 2024-12-08T05:50:12,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42137 {}] regionserver.HRegion(8855): Flush requested on ea539f352c97cdd37f9496939ff0b706 2024-12-08T05:50:12,673 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ea539f352c97cdd37f9496939ff0b706 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-08T05:50:12,680 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/default/TestLogRolling-testSlowSyncLogRolling/ea539f352c97cdd37f9496939ff0b706/.tmp/info/433e35af018749b0b5aa9629b6f8a227 is 1080, key is row0022/info:/1733637000648/Put/seqid=0 2024-12-08T05:50:12,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36757 is added to blk_1073741849_1025 (size=12509) 2024-12-08T05:50:12,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741849_1025 (size=12509) 2024-12-08T05:50:12,687 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/default/TestLogRolling-testSlowSyncLogRolling/ea539f352c97cdd37f9496939ff0b706/.tmp/info/433e35af018749b0b5aa9629b6f8a227 2024-12-08T05:50:12,696 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/default/TestLogRolling-testSlowSyncLogRolling/ea539f352c97cdd37f9496939ff0b706/.tmp/info/433e35af018749b0b5aa9629b6f8a227 as hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/default/TestLogRolling-testSlowSyncLogRolling/ea539f352c97cdd37f9496939ff0b706/info/433e35af018749b0b5aa9629b6f8a227 2024-12-08T05:50:12,704 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/default/TestLogRolling-testSlowSyncLogRolling/ea539f352c97cdd37f9496939ff0b706/info/433e35af018749b0b5aa9629b6f8a227, entries=7, sequenceid=42, filesize=12.2 K 2024-12-08T05:50:12,706 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for ea539f352c97cdd37f9496939ff0b706 in 33ms, sequenceid=42, compaction requested=false 2024-12-08T05:50:12,706 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ea539f352c97cdd37f9496939ff0b706: 2024-12-08T05:50:12,706 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-12-08T05:50:12,706 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T05:50:12,706 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/default/TestLogRolling-testSlowSyncLogRolling/ea539f352c97cdd37f9496939ff0b706/info/622426718be240249090f5cf1c09ff27 because midkey is the same as first or last row 2024-12-08T05:50:14,348 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-08T05:50:18,931 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region ea539f352c97cdd37f9496939ff0b706, had cached 0 bytes from a total of 40219 2024-12-08T05:50:20,683 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-08T05:50:20,684 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-08T05:50:20,684 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T05:50:20,689 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T05:50:20,690 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T05:50:20,690 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-08T05:50:20,690 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-08T05:50:20,690 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=488572464, stopped=false 2024-12-08T05:50:20,690 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=0d942cb2025d,39457,1733636925244 2024-12-08T05:50:20,692 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42137-0x101909e0e880001, quorum=127.0.0.1:49643, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T05:50:20,692 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39457-0x101909e0e880000, quorum=127.0.0.1:49643, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T05:50:20,692 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42137-0x101909e0e880001, quorum=127.0.0.1:49643, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:50:20,692 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39457-0x101909e0e880000, quorum=127.0.0.1:49643, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:50:20,692 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-08T05:50:20,693 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-08T05:50:20,693 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T05:50:20,693 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T05:50:20,693 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:39457-0x101909e0e880000, quorum=127.0.0.1:49643, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T05:50:20,693 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42137-0x101909e0e880001, quorum=127.0.0.1:49643, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T05:50:20,693 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '0d942cb2025d,42137,1733636925929' ***** 2024-12-08T05:50:20,693 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-08T05:50:20,694 INFO [RS:0;0d942cb2025d:42137 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-08T05:50:20,694 INFO [RS:0;0d942cb2025d:42137 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-08T05:50:20,694 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-08T05:50:20,694 INFO [RS:0;0d942cb2025d:42137 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-08T05:50:20,695 INFO [RS:0;0d942cb2025d:42137 {}] regionserver.HRegionServer(3091): Received CLOSE for ea539f352c97cdd37f9496939ff0b706 2024-12-08T05:50:20,695 INFO [RS:0;0d942cb2025d:42137 {}] regionserver.HRegionServer(959): stopping server 0d942cb2025d,42137,1733636925929 2024-12-08T05:50:20,695 INFO [RS:0;0d942cb2025d:42137 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T05:50:20,695 INFO [RS:0;0d942cb2025d:42137 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;0d942cb2025d:42137. 2024-12-08T05:50:20,695 DEBUG [RS:0;0d942cb2025d:42137 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T05:50:20,695 DEBUG [RS:0;0d942cb2025d:42137 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T05:50:20,695 INFO [RS:0;0d942cb2025d:42137 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-08T05:50:20,695 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing ea539f352c97cdd37f9496939ff0b706, disabling compactions & flushes 2024-12-08T05:50:20,696 INFO [RS:0;0d942cb2025d:42137 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-08T05:50:20,696 INFO [RS:0;0d942cb2025d:42137 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-08T05:50:20,696 INFO [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1733636928083.ea539f352c97cdd37f9496939ff0b706. 2024-12-08T05:50:20,696 INFO [RS:0;0d942cb2025d:42137 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-08T05:50:20,696 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1733636928083.ea539f352c97cdd37f9496939ff0b706. 2024-12-08T05:50:20,696 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1733636928083.ea539f352c97cdd37f9496939ff0b706. after waiting 0 ms 2024-12-08T05:50:20,696 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1733636928083.ea539f352c97cdd37f9496939ff0b706. 2024-12-08T05:50:20,696 INFO [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing ea539f352c97cdd37f9496939ff0b706 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-12-08T05:50:20,696 INFO [RS:0;0d942cb2025d:42137 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-08T05:50:20,696 DEBUG [RS:0;0d942cb2025d:42137 {}] regionserver.HRegionServer(1325): Online Regions={ea539f352c97cdd37f9496939ff0b706=TestLogRolling-testSlowSyncLogRolling,,1733636928083.ea539f352c97cdd37f9496939ff0b706., 1588230740=hbase:meta,,1.1588230740} 2024-12-08T05:50:20,696 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-08T05:50:20,696 INFO [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-08T05:50:20,696 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-08T05:50:20,696 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-08T05:50:20,696 DEBUG [RS:0;0d942cb2025d:42137 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, ea539f352c97cdd37f9496939ff0b706 2024-12-08T05:50:20,696 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-08T05:50:20,697 INFO [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-12-08T05:50:20,702 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/default/TestLogRolling-testSlowSyncLogRolling/ea539f352c97cdd37f9496939ff0b706/.tmp/info/f4387ce8f76a4c9a9587a96bf90f94a7 is 1080, key is row0029/info:/1733637014675/Put/seqid=0 2024-12-08T05:50:20,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36757 is added to blk_1073741850_1026 (size=8193) 2024-12-08T05:50:20,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741850_1026 (size=8193) 2024-12-08T05:50:20,715 INFO [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/default/TestLogRolling-testSlowSyncLogRolling/ea539f352c97cdd37f9496939ff0b706/.tmp/info/f4387ce8f76a4c9a9587a96bf90f94a7 2024-12-08T05:50:20,719 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/hbase/meta/1588230740/.tmp/info/1a5e55e7a96e43dcb839d24ec780f89b is 195, key is TestLogRolling-testSlowSyncLogRolling,,1733636928083.ea539f352c97cdd37f9496939ff0b706./info:regioninfo/1733636928955/Put/seqid=0 2024-12-08T05:50:20,725 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/default/TestLogRolling-testSlowSyncLogRolling/ea539f352c97cdd37f9496939ff0b706/.tmp/info/f4387ce8f76a4c9a9587a96bf90f94a7 as hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/default/TestLogRolling-testSlowSyncLogRolling/ea539f352c97cdd37f9496939ff0b706/info/f4387ce8f76a4c9a9587a96bf90f94a7 2024-12-08T05:50:20,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36757 is added to blk_1073741851_1027 (size=7016) 2024-12-08T05:50:20,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741851_1027 (size=7016) 2024-12-08T05:50:20,729 INFO [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/hbase/meta/1588230740/.tmp/info/1a5e55e7a96e43dcb839d24ec780f89b 2024-12-08T05:50:20,734 INFO [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/default/TestLogRolling-testSlowSyncLogRolling/ea539f352c97cdd37f9496939ff0b706/info/f4387ce8f76a4c9a9587a96bf90f94a7, entries=3, sequenceid=48, filesize=8.0 K 2024-12-08T05:50:20,736 INFO [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for ea539f352c97cdd37f9496939ff0b706 in 40ms, sequenceid=48, compaction requested=true 2024-12-08T05:50:20,736 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733636928083.ea539f352c97cdd37f9496939ff0b706.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/default/TestLogRolling-testSlowSyncLogRolling/ea539f352c97cdd37f9496939ff0b706/info/da091eb26b01448d97943cf64cf8e44b, hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/default/TestLogRolling-testSlowSyncLogRolling/ea539f352c97cdd37f9496939ff0b706/info/dba8c328f93443faae50612eced2897e, hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/default/TestLogRolling-testSlowSyncLogRolling/ea539f352c97cdd37f9496939ff0b706/info/69e835c8b9b2430fb15f7bc17c9e19a7] to archive 2024-12-08T05:50:20,739 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733636928083.ea539f352c97cdd37f9496939ff0b706.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-08T05:50:20,743 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733636928083.ea539f352c97cdd37f9496939ff0b706.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/default/TestLogRolling-testSlowSyncLogRolling/ea539f352c97cdd37f9496939ff0b706/info/da091eb26b01448d97943cf64cf8e44b to hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/archive/data/default/TestLogRolling-testSlowSyncLogRolling/ea539f352c97cdd37f9496939ff0b706/info/da091eb26b01448d97943cf64cf8e44b 2024-12-08T05:50:20,745 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733636928083.ea539f352c97cdd37f9496939ff0b706.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/default/TestLogRolling-testSlowSyncLogRolling/ea539f352c97cdd37f9496939ff0b706/info/dba8c328f93443faae50612eced2897e to hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/archive/data/default/TestLogRolling-testSlowSyncLogRolling/ea539f352c97cdd37f9496939ff0b706/info/dba8c328f93443faae50612eced2897e 2024-12-08T05:50:20,747 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733636928083.ea539f352c97cdd37f9496939ff0b706.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/default/TestLogRolling-testSlowSyncLogRolling/ea539f352c97cdd37f9496939ff0b706/info/69e835c8b9b2430fb15f7bc17c9e19a7 to hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/archive/data/default/TestLogRolling-testSlowSyncLogRolling/ea539f352c97cdd37f9496939ff0b706/info/69e835c8b9b2430fb15f7bc17c9e19a7 2024-12-08T05:50:20,759 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/hbase/meta/1588230740/.tmp/ns/fb3cacee942b432e8aaf6c357fffaf68 is 43, key is default/ns:d/1733636927828/Put/seqid=0 2024-12-08T05:50:20,762 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733636928083.ea539f352c97cdd37f9496939ff0b706.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=0d942cb2025d:39457 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-12-08T05:50:20,766 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733636928083.ea539f352c97cdd37f9496939ff0b706.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [da091eb26b01448d97943cf64cf8e44b=12509, dba8c328f93443faae50612eced2897e=12509, 69e835c8b9b2430fb15f7bc17c9e19a7=12509] 2024-12-08T05:50:20,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36757 is added to blk_1073741852_1028 (size=5153) 2024-12-08T05:50:20,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741852_1028 (size=5153) 2024-12-08T05:50:20,769 INFO [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/hbase/meta/1588230740/.tmp/ns/fb3cacee942b432e8aaf6c357fffaf68 2024-12-08T05:50:20,772 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/default/TestLogRolling-testSlowSyncLogRolling/ea539f352c97cdd37f9496939ff0b706/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-12-08T05:50:20,775 INFO [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1733636928083.ea539f352c97cdd37f9496939ff0b706. 2024-12-08T05:50:20,775 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for ea539f352c97cdd37f9496939ff0b706: Waiting for close lock at 1733637020695Running coprocessor pre-close hooks at 1733637020695Disabling compacts and flushes for region at 1733637020695Disabling writes for close at 1733637020696 (+1 ms)Obtaining lock to block concurrent updates at 1733637020696Preparing flush snapshotting stores in ea539f352c97cdd37f9496939ff0b706 at 1733637020696Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1733636928083.ea539f352c97cdd37f9496939ff0b706., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1733637020696Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1733636928083.ea539f352c97cdd37f9496939ff0b706. at 1733637020698 (+2 ms)Flushing ea539f352c97cdd37f9496939ff0b706/info: creating writer at 1733637020698Flushing ea539f352c97cdd37f9496939ff0b706/info: appending metadata at 1733637020702 (+4 ms)Flushing ea539f352c97cdd37f9496939ff0b706/info: closing flushed file at 1733637020702Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@38cd83cb: reopening flushed file at 1733637020724 (+22 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for ea539f352c97cdd37f9496939ff0b706 in 40ms, sequenceid=48, compaction requested=true at 1733637020736 (+12 ms)Writing region close event to WAL at 1733637020767 (+31 ms)Running coprocessor post-close hooks at 1733637020773 (+6 ms)Closed at 1733637020775 (+2 ms) 2024-12-08T05:50:20,776 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1733636928083.ea539f352c97cdd37f9496939ff0b706. 2024-12-08T05:50:20,795 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/hbase/meta/1588230740/.tmp/table/9a707cdc891e40d798ab48fbfb67b701 is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1733636928974/Put/seqid=0 2024-12-08T05:50:20,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36757 is added to blk_1073741853_1029 (size=5396) 2024-12-08T05:50:20,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741853_1029 (size=5396) 2024-12-08T05:50:20,802 INFO [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/hbase/meta/1588230740/.tmp/table/9a707cdc891e40d798ab48fbfb67b701 2024-12-08T05:50:20,811 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/hbase/meta/1588230740/.tmp/info/1a5e55e7a96e43dcb839d24ec780f89b as hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/hbase/meta/1588230740/info/1a5e55e7a96e43dcb839d24ec780f89b 2024-12-08T05:50:20,819 INFO [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/hbase/meta/1588230740/info/1a5e55e7a96e43dcb839d24ec780f89b, entries=10, sequenceid=11, filesize=6.9 K 2024-12-08T05:50:20,821 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/hbase/meta/1588230740/.tmp/ns/fb3cacee942b432e8aaf6c357fffaf68 as hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/hbase/meta/1588230740/ns/fb3cacee942b432e8aaf6c357fffaf68 2024-12-08T05:50:20,828 INFO [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/hbase/meta/1588230740/ns/fb3cacee942b432e8aaf6c357fffaf68, entries=2, sequenceid=11, filesize=5.0 K 2024-12-08T05:50:20,829 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/hbase/meta/1588230740/.tmp/table/9a707cdc891e40d798ab48fbfb67b701 as hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/hbase/meta/1588230740/table/9a707cdc891e40d798ab48fbfb67b701 2024-12-08T05:50:20,836 INFO [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/hbase/meta/1588230740/table/9a707cdc891e40d798ab48fbfb67b701, entries=2, sequenceid=11, filesize=5.3 K 2024-12-08T05:50:20,838 INFO [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 142ms, sequenceid=11, compaction requested=false 2024-12-08T05:50:20,845 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-08T05:50:20,846 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-08T05:50:20,846 INFO [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-08T05:50:20,846 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733637020696Running coprocessor pre-close hooks at 1733637020696Disabling compacts and flushes for region at 1733637020696Disabling writes for close at 1733637020696Obtaining lock to block concurrent updates at 1733637020697 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1733637020697Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1733637020697Flushing stores of hbase:meta,,1.1588230740 at 1733637020698 (+1 ms)Flushing 1588230740/info: creating writer at 1733637020698Flushing 1588230740/info: appending metadata at 1733637020718 (+20 ms)Flushing 1588230740/info: closing flushed file at 1733637020718Flushing 1588230740/ns: creating writer at 1733637020738 (+20 ms)Flushing 1588230740/ns: appending metadata at 1733637020758 (+20 ms)Flushing 1588230740/ns: closing flushed file at 1733637020759 (+1 ms)Flushing 1588230740/table: creating writer at 1733637020779 (+20 ms)Flushing 1588230740/table: appending metadata at 1733637020794 (+15 ms)Flushing 1588230740/table: closing flushed file at 1733637020794Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@701363cd: reopening flushed file at 1733637020810 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@364d205a: reopening flushed file at 1733637020820 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2c9f3bbb: reopening flushed file at 1733637020828 (+8 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 142ms, sequenceid=11, compaction requested=false at 1733637020838 (+10 ms)Writing region close event to WAL at 1733637020840 (+2 ms)Running coprocessor post-close hooks at 1733637020846 (+6 ms)Closed at 1733637020846 2024-12-08T05:50:20,846 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-08T05:50:20,897 INFO [RS:0;0d942cb2025d:42137 {}] regionserver.HRegionServer(976): stopping server 0d942cb2025d,42137,1733636925929; all regions closed. 2024-12-08T05:50:20,899 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:20,899 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:20,899 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:20,899 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:20,899 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:20,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741834_1010 (size=3066) 2024-12-08T05:50:20,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36757 is added to blk_1073741834_1010 (size=3066) 2024-12-08T05:50:20,906 DEBUG [RS:0;0d942cb2025d:42137 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/oldWALs 2024-12-08T05:50:20,907 INFO [RS:0;0d942cb2025d:42137 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 0d942cb2025d%2C42137%2C1733636925929.meta:.meta(num 1733636927660) 2024-12-08T05:50:20,907 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:20,907 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:20,907 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:20,907 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:20,908 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:20,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36757 is added to blk_1073741847_1023 (size=12695) 2024-12-08T05:50:20,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741847_1023 (size=12695) 2024-12-08T05:50:20,914 DEBUG [RS:0;0d942cb2025d:42137 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/oldWALs 2024-12-08T05:50:20,914 INFO [RS:0;0d942cb2025d:42137 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 0d942cb2025d%2C42137%2C1733636925929:(num 1733637000646) 2024-12-08T05:50:20,914 DEBUG [RS:0;0d942cb2025d:42137 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T05:50:20,914 INFO [RS:0;0d942cb2025d:42137 {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T05:50:20,914 INFO [RS:0;0d942cb2025d:42137 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T05:50:20,915 INFO [RS:0;0d942cb2025d:42137 {}] hbase.ChoreService(370): Chore service for: regionserver/0d942cb2025d:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-08T05:50:20,915 INFO [RS:0;0d942cb2025d:42137 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T05:50:20,915 INFO [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T05:50:20,915 INFO [RS:0;0d942cb2025d:42137 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42137 2024-12-08T05:50:20,920 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42137-0x101909e0e880001, quorum=127.0.0.1:49643, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/0d942cb2025d,42137,1733636925929 2024-12-08T05:50:20,920 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39457-0x101909e0e880000, quorum=127.0.0.1:49643, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T05:50:20,920 INFO [RS:0;0d942cb2025d:42137 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T05:50:20,921 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [0d942cb2025d,42137,1733636925929] 2024-12-08T05:50:20,925 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/0d942cb2025d,42137,1733636925929 already deleted, retry=false 2024-12-08T05:50:20,925 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 0d942cb2025d,42137,1733636925929 expired; onlineServers=0 2024-12-08T05:50:20,925 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '0d942cb2025d,39457,1733636925244' ***** 2024-12-08T05:50:20,925 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-08T05:50:20,925 INFO [M:0;0d942cb2025d:39457 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T05:50:20,925 INFO [M:0;0d942cb2025d:39457 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T05:50:20,926 DEBUG [M:0;0d942cb2025d:39457 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-08T05:50:20,926 DEBUG [M:0;0d942cb2025d:39457 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-08T05:50:20,926 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-08T05:50:20,926 DEBUG [master/0d942cb2025d:0:becomeActiveMaster-HFileCleaner.large.0-1733636926928 {}] cleaner.HFileCleaner(306): Exit Thread[master/0d942cb2025d:0:becomeActiveMaster-HFileCleaner.large.0-1733636926928,5,FailOnTimeoutGroup] 2024-12-08T05:50:20,926 DEBUG [master/0d942cb2025d:0:becomeActiveMaster-HFileCleaner.small.0-1733636926929 {}] cleaner.HFileCleaner(306): Exit Thread[master/0d942cb2025d:0:becomeActiveMaster-HFileCleaner.small.0-1733636926929,5,FailOnTimeoutGroup] 2024-12-08T05:50:20,926 INFO [M:0;0d942cb2025d:39457 {}] hbase.ChoreService(370): Chore service for: master/0d942cb2025d:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-08T05:50:20,926 INFO [M:0;0d942cb2025d:39457 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T05:50:20,926 DEBUG [M:0;0d942cb2025d:39457 {}] master.HMaster(1795): Stopping service threads 2024-12-08T05:50:20,926 INFO [M:0;0d942cb2025d:39457 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-08T05:50:20,926 INFO [M:0;0d942cb2025d:39457 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-08T05:50:20,927 INFO [M:0;0d942cb2025d:39457 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-08T05:50:20,927 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-08T05:50:20,927 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39457-0x101909e0e880000, quorum=127.0.0.1:49643, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-08T05:50:20,928 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39457-0x101909e0e880000, quorum=127.0.0.1:49643, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:50:20,928 DEBUG [M:0;0d942cb2025d:39457 {}] zookeeper.ZKUtil(347): master:39457-0x101909e0e880000, quorum=127.0.0.1:49643, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-08T05:50:20,928 WARN [M:0;0d942cb2025d:39457 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-08T05:50:20,929 INFO [M:0;0d942cb2025d:39457 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/.lastflushedseqids 2024-12-08T05:50:20,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36757 is added to blk_1073741854_1030 (size=130) 2024-12-08T05:50:20,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741854_1030 (size=130) 2024-12-08T05:50:20,942 INFO [M:0;0d942cb2025d:39457 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-08T05:50:20,942 INFO [M:0;0d942cb2025d:39457 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-08T05:50:20,942 DEBUG [M:0;0d942cb2025d:39457 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-08T05:50:20,942 INFO [M:0;0d942cb2025d:39457 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:50:20,942 DEBUG [M:0;0d942cb2025d:39457 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:50:20,942 DEBUG [M:0;0d942cb2025d:39457 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-08T05:50:20,942 DEBUG [M:0;0d942cb2025d:39457 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:50:20,943 INFO [M:0;0d942cb2025d:39457 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.02 KB heapSize=29.20 KB 2024-12-08T05:50:20,962 DEBUG [M:0;0d942cb2025d:39457 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/9d0a55def36a4fbdbbb6ceb8155035f4 is 82, key is hbase:meta,,1/info:regioninfo/1733636927743/Put/seqid=0 2024-12-08T05:50:20,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36757 is added to blk_1073741855_1031 (size=5672) 2024-12-08T05:50:20,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741855_1031 (size=5672) 2024-12-08T05:50:20,970 INFO [M:0;0d942cb2025d:39457 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/9d0a55def36a4fbdbbb6ceb8155035f4 2024-12-08T05:50:20,995 DEBUG [M:0;0d942cb2025d:39457 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f1924e16f8b347098903fbf6ed21cc10 is 766, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733636928991/Put/seqid=0 2024-12-08T05:50:21,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741856_1032 (size=6247) 2024-12-08T05:50:21,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36757 is added to blk_1073741856_1032 (size=6247) 2024-12-08T05:50:21,003 INFO [M:0;0d942cb2025d:39457 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.42 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f1924e16f8b347098903fbf6ed21cc10 2024-12-08T05:50:21,009 INFO [M:0;0d942cb2025d:39457 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for f1924e16f8b347098903fbf6ed21cc10 2024-12-08T05:50:21,023 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42137-0x101909e0e880001, quorum=127.0.0.1:49643, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T05:50:21,023 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42137-0x101909e0e880001, quorum=127.0.0.1:49643, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T05:50:21,023 INFO [RS:0;0d942cb2025d:42137 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T05:50:21,023 INFO [RS:0;0d942cb2025d:42137 {}] regionserver.HRegionServer(1031): Exiting; stopping=0d942cb2025d,42137,1733636925929; zookeeper connection closed. 2024-12-08T05:50:21,024 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@25115084 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@25115084 2024-12-08T05:50:21,024 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-08T05:50:21,027 DEBUG [M:0;0d942cb2025d:39457 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1beb3eaca1ea45c5a42f2fdf68d4ca37 is 69, key is 0d942cb2025d,42137,1733636925929/rs:state/1733636926968/Put/seqid=0 2024-12-08T05:50:21,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741857_1033 (size=5156) 2024-12-08T05:50:21,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36757 is added to blk_1073741857_1033 (size=5156) 2024-12-08T05:50:21,033 INFO [M:0;0d942cb2025d:39457 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1beb3eaca1ea45c5a42f2fdf68d4ca37 2024-12-08T05:50:21,058 DEBUG [M:0;0d942cb2025d:39457 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/fd5401d01e0b43748b98c131bc2c317c is 52, key is load_balancer_on/state:d/1733636928057/Put/seqid=0 2024-12-08T05:50:21,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36757 is added to blk_1073741858_1034 (size=5056) 2024-12-08T05:50:21,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741858_1034 (size=5056) 2024-12-08T05:50:21,064 INFO [M:0;0d942cb2025d:39457 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/fd5401d01e0b43748b98c131bc2c317c 2024-12-08T05:50:21,066 INFO [regionserver/0d942cb2025d:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T05:50:21,073 DEBUG [M:0;0d942cb2025d:39457 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/9d0a55def36a4fbdbbb6ceb8155035f4 as hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/9d0a55def36a4fbdbbb6ceb8155035f4 2024-12-08T05:50:21,079 INFO [M:0;0d942cb2025d:39457 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/9d0a55def36a4fbdbbb6ceb8155035f4, entries=8, sequenceid=59, filesize=5.5 K 2024-12-08T05:50:21,081 DEBUG [M:0;0d942cb2025d:39457 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f1924e16f8b347098903fbf6ed21cc10 as hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/f1924e16f8b347098903fbf6ed21cc10 2024-12-08T05:50:21,087 INFO [M:0;0d942cb2025d:39457 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for f1924e16f8b347098903fbf6ed21cc10 2024-12-08T05:50:21,087 INFO [M:0;0d942cb2025d:39457 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/f1924e16f8b347098903fbf6ed21cc10, entries=6, sequenceid=59, filesize=6.1 K 2024-12-08T05:50:21,089 DEBUG [M:0;0d942cb2025d:39457 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1beb3eaca1ea45c5a42f2fdf68d4ca37 as hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/1beb3eaca1ea45c5a42f2fdf68d4ca37 2024-12-08T05:50:21,095 INFO [M:0;0d942cb2025d:39457 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/1beb3eaca1ea45c5a42f2fdf68d4ca37, entries=1, sequenceid=59, filesize=5.0 K 2024-12-08T05:50:21,096 DEBUG [M:0;0d942cb2025d:39457 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/fd5401d01e0b43748b98c131bc2c317c as hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/fd5401d01e0b43748b98c131bc2c317c 2024-12-08T05:50:21,103 INFO [M:0;0d942cb2025d:39457 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/fd5401d01e0b43748b98c131bc2c317c, entries=1, sequenceid=59, filesize=4.9 K 2024-12-08T05:50:21,104 INFO [M:0;0d942cb2025d:39457 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 162ms, sequenceid=59, compaction requested=false 2024-12-08T05:50:21,106 INFO [M:0;0d942cb2025d:39457 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:50:21,106 DEBUG [M:0;0d942cb2025d:39457 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733637020942Disabling compacts and flushes for region at 1733637020942Disabling writes for close at 1733637020942Obtaining lock to block concurrent updates at 1733637020943 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733637020943Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23576, getHeapSize=29840, getOffHeapSize=0, getCellsCount=70 at 1733637020943Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733637020944 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733637020944Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733637020962 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733637020962Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733637020977 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733637020995 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733637020995Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733637021010 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733637021026 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733637021026Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733637021041 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733637021057 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733637021057Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3ab2b074: reopening flushed file at 1733637021072 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6241971a: reopening flushed file at 1733637021080 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1c9130b: reopening flushed file at 1733637021088 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@28bf440b: reopening flushed file at 1733637021095 (+7 ms)Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 162ms, sequenceid=59, compaction requested=false at 1733637021104 (+9 ms)Writing region close event to WAL at 1733637021106 (+2 ms)Closed at 1733637021106 2024-12-08T05:50:21,107 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:21,107 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:21,107 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:21,107 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:21,108 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:21,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741830_1006 (size=27973) 2024-12-08T05:50:21,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36757 is added to blk_1073741830_1006 (size=27973) 2024-12-08T05:50:21,111 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T05:50:21,111 INFO [M:0;0d942cb2025d:39457 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-08T05:50:21,111 INFO [M:0;0d942cb2025d:39457 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39457 2024-12-08T05:50:21,112 INFO [M:0;0d942cb2025d:39457 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T05:50:21,213 INFO [M:0;0d942cb2025d:39457 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T05:50:21,213 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39457-0x101909e0e880000, quorum=127.0.0.1:49643, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T05:50:21,214 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39457-0x101909e0e880000, quorum=127.0.0.1:49643, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T05:50:21,218 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@78be0d39{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T05:50:21,221 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@617aa169{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T05:50:21,221 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T05:50:21,221 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2e06ea5e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T05:50:21,221 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1612a852{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad4fbbec-2e5f-4815-8593-c0e716e97e99/hadoop.log.dir/,STOPPED} 2024-12-08T05:50:21,224 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T05:50:21,224 WARN [BP-1789351033-172.17.0.2-1733636922461 heartbeating to localhost/127.0.0.1:45577 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T05:50:21,224 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T05:50:21,224 WARN [BP-1789351033-172.17.0.2-1733636922461 heartbeating to localhost/127.0.0.1:45577 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1789351033-172.17.0.2-1733636922461 (Datanode Uuid 6ccd3602-0229-4e3b-96de-8d70753dc2d3) service to localhost/127.0.0.1:45577 2024-12-08T05:50:21,225 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad4fbbec-2e5f-4815-8593-c0e716e97e99/cluster_91652ab4-b278-fb5a-2747-de5bc494f0d9/data/data3/current/BP-1789351033-172.17.0.2-1733636922461 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T05:50:21,226 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad4fbbec-2e5f-4815-8593-c0e716e97e99/cluster_91652ab4-b278-fb5a-2747-de5bc494f0d9/data/data4/current/BP-1789351033-172.17.0.2-1733636922461 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T05:50:21,226 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T05:50:21,231 WARN [BP-1789351033-172.17.0.2-1733636922461 heartbeating to localhost/127.0.0.1:45577 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1789351033-172.17.0.2-1733636922461 (Datanode Uuid be95e663-f2a9-4e51-ba7d-6dde5fb2ed73) service to localhost/127.0.0.1:45577 2024-12-08T05:50:21,232 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad4fbbec-2e5f-4815-8593-c0e716e97e99/cluster_91652ab4-b278-fb5a-2747-de5bc494f0d9/data/data1/current/BP-1789351033-172.17.0.2-1733636922461 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T05:50:21,232 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad4fbbec-2e5f-4815-8593-c0e716e97e99/cluster_91652ab4-b278-fb5a-2747-de5bc494f0d9/data/data2/current/BP-1789351033-172.17.0.2-1733636922461 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T05:50:21,235 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@32c41a8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T05:50:21,236 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@21c64e78{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T05:50:21,236 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T05:50:21,236 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@198fe7a1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T05:50:21,236 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@616d254c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad4fbbec-2e5f-4815-8593-c0e716e97e99/hadoop.log.dir/,STOPPED} 2024-12-08T05:50:21,239 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T05:50:21,251 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5f961078{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-08T05:50:21,251 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@25dfddc5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T05:50:21,252 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T05:50:21,252 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@455f3457{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T05:50:21,252 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@75bdea07{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad4fbbec-2e5f-4815-8593-c0e716e97e99/hadoop.log.dir/,STOPPED} 2024-12-08T05:50:21,263 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-08T05:50:21,298 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-08T05:50:21,310 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=80 (was 12) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: IPC Client (1609174458) connection to localhost/127.0.0.1:45577 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1609174458) connection to localhost/127.0.0.1:45577 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:45577 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45577 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:45577 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: IPC Client (1609174458) connection to localhost/127.0.0.1:45577 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: master/0d942cb2025d:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45577 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45577 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@3aaca39e java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: master/0d942cb2025d:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/0d942cb2025d:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Command processor java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) - Thread LEAK? -, OpenFileDescriptor=404 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=172 (was 271), ProcessCount=11 (was 12), AvailableMemoryMB=8446 (was 8549) 2024-12-08T05:50:21,318 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=81, OpenFileDescriptor=404, MaxFileDescriptor=1048576, SystemLoadAverage=172, ProcessCount=11, AvailableMemoryMB=8445 2024-12-08T05:50:21,319 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-08T05:50:21,319 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad4fbbec-2e5f-4815-8593-c0e716e97e99/hadoop.log.dir so I do NOT create it in target/test-data/6e3c429d-07a8-c096-263c-b984a35e58ea 2024-12-08T05:50:21,319 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ad4fbbec-2e5f-4815-8593-c0e716e97e99/hadoop.tmp.dir so I do NOT create it in target/test-data/6e3c429d-07a8-c096-263c-b984a35e58ea 2024-12-08T05:50:21,319 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e3c429d-07a8-c096-263c-b984a35e58ea/cluster_17cc2087-deb6-209d-14bf-6c02334f53a5, deleteOnExit=true 2024-12-08T05:50:21,319 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-08T05:50:21,319 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e3c429d-07a8-c096-263c-b984a35e58ea/test.cache.data in system properties and HBase conf 2024-12-08T05:50:21,320 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e3c429d-07a8-c096-263c-b984a35e58ea/hadoop.tmp.dir in system properties and HBase conf 2024-12-08T05:50:21,320 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e3c429d-07a8-c096-263c-b984a35e58ea/hadoop.log.dir in system properties and HBase conf 2024-12-08T05:50:21,320 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e3c429d-07a8-c096-263c-b984a35e58ea/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-08T05:50:21,320 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e3c429d-07a8-c096-263c-b984a35e58ea/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-08T05:50:21,320 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-08T05:50:21,320 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-08T05:50:21,320 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e3c429d-07a8-c096-263c-b984a35e58ea/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-08T05:50:21,320 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e3c429d-07a8-c096-263c-b984a35e58ea/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-08T05:50:21,320 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e3c429d-07a8-c096-263c-b984a35e58ea/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-08T05:50:21,320 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e3c429d-07a8-c096-263c-b984a35e58ea/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-08T05:50:21,321 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e3c429d-07a8-c096-263c-b984a35e58ea/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-08T05:50:21,321 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e3c429d-07a8-c096-263c-b984a35e58ea/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-08T05:50:21,321 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e3c429d-07a8-c096-263c-b984a35e58ea/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-08T05:50:21,321 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e3c429d-07a8-c096-263c-b984a35e58ea/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-08T05:50:21,321 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e3c429d-07a8-c096-263c-b984a35e58ea/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-08T05:50:21,321 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e3c429d-07a8-c096-263c-b984a35e58ea/nfs.dump.dir in system properties and HBase conf 2024-12-08T05:50:21,321 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e3c429d-07a8-c096-263c-b984a35e58ea/java.io.tmpdir in system properties and HBase conf 2024-12-08T05:50:21,321 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e3c429d-07a8-c096-263c-b984a35e58ea/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-08T05:50:21,321 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e3c429d-07a8-c096-263c-b984a35e58ea/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-08T05:50:21,321 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e3c429d-07a8-c096-263c-b984a35e58ea/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-08T05:50:21,335 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-08T05:50:21,412 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T05:50:21,420 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T05:50:21,421 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T05:50:21,421 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T05:50:21,421 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T05:50:21,422 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T05:50:21,422 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@166c3234{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e3c429d-07a8-c096-263c-b984a35e58ea/hadoop.log.dir/,AVAILABLE} 2024-12-08T05:50:21,422 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1ced705f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T05:50:21,540 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@65adc0b2{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e3c429d-07a8-c096-263c-b984a35e58ea/java.io.tmpdir/jetty-localhost-37853-hadoop-hdfs-3_4_1-tests_jar-_-any-16342189193196441579/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-08T05:50:21,541 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@55f1c948{HTTP/1.1, (http/1.1)}{localhost:37853} 2024-12-08T05:50:21,541 INFO [Time-limited test {}] server.Server(415): Started @101725ms 2024-12-08T05:50:21,555 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-08T05:50:21,632 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T05:50:21,636 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T05:50:21,637 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T05:50:21,637 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T05:50:21,637 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T05:50:21,637 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@26c686af{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e3c429d-07a8-c096-263c-b984a35e58ea/hadoop.log.dir/,AVAILABLE} 2024-12-08T05:50:21,638 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@144bd696{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T05:50:21,753 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3b4287c4{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e3c429d-07a8-c096-263c-b984a35e58ea/java.io.tmpdir/jetty-localhost-34937-hadoop-hdfs-3_4_1-tests_jar-_-any-16868712104938054816/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T05:50:21,753 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@68e0715a{HTTP/1.1, (http/1.1)}{localhost:34937} 2024-12-08T05:50:21,753 INFO [Time-limited test {}] server.Server(415): Started @101937ms 2024-12-08T05:50:21,755 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T05:50:21,796 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T05:50:21,800 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T05:50:21,801 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T05:50:21,801 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T05:50:21,801 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T05:50:21,802 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@58e3ba2d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e3c429d-07a8-c096-263c-b984a35e58ea/hadoop.log.dir/,AVAILABLE} 2024-12-08T05:50:21,802 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@57c7b86a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T05:50:21,868 WARN [Thread-436 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e3c429d-07a8-c096-263c-b984a35e58ea/cluster_17cc2087-deb6-209d-14bf-6c02334f53a5/data/data1/current/BP-2103863792-172.17.0.2-1733637021354/current, will proceed with Du for space computation calculation, 2024-12-08T05:50:21,868 WARN [Thread-437 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e3c429d-07a8-c096-263c-b984a35e58ea/cluster_17cc2087-deb6-209d-14bf-6c02334f53a5/data/data2/current/BP-2103863792-172.17.0.2-1733637021354/current, will proceed with Du for space computation calculation, 2024-12-08T05:50:21,886 WARN [Thread-415 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T05:50:21,889 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x77af4f353d44414c with lease ID 0x69478177fbd8b5b9: Processing first storage report for DS-332787a7-1c5f-4d64-b07a-5fae0b1a770f from datanode DatanodeRegistration(127.0.0.1:38399, datanodeUuid=99676bc3-7d96-4cbc-ac91-38a34514767d, infoPort=37927, infoSecurePort=0, ipcPort=37871, storageInfo=lv=-57;cid=testClusterID;nsid=1996714429;c=1733637021354) 2024-12-08T05:50:21,889 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x77af4f353d44414c with lease ID 0x69478177fbd8b5b9: from storage DS-332787a7-1c5f-4d64-b07a-5fae0b1a770f node DatanodeRegistration(127.0.0.1:38399, datanodeUuid=99676bc3-7d96-4cbc-ac91-38a34514767d, infoPort=37927, infoSecurePort=0, ipcPort=37871, storageInfo=lv=-57;cid=testClusterID;nsid=1996714429;c=1733637021354), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T05:50:21,889 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x77af4f353d44414c with lease ID 0x69478177fbd8b5b9: Processing first storage report for DS-aacfec95-7a61-4649-9786-39351996349e from datanode DatanodeRegistration(127.0.0.1:38399, datanodeUuid=99676bc3-7d96-4cbc-ac91-38a34514767d, infoPort=37927, infoSecurePort=0, ipcPort=37871, storageInfo=lv=-57;cid=testClusterID;nsid=1996714429;c=1733637021354) 2024-12-08T05:50:21,889 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x77af4f353d44414c with lease ID 0x69478177fbd8b5b9: from storage DS-aacfec95-7a61-4649-9786-39351996349e node DatanodeRegistration(127.0.0.1:38399, datanodeUuid=99676bc3-7d96-4cbc-ac91-38a34514767d, infoPort=37927, infoSecurePort=0, ipcPort=37871, storageInfo=lv=-57;cid=testClusterID;nsid=1996714429;c=1733637021354), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T05:50:21,919 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3704b2b9{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e3c429d-07a8-c096-263c-b984a35e58ea/java.io.tmpdir/jetty-localhost-44131-hadoop-hdfs-3_4_1-tests_jar-_-any-13740332922172934068/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T05:50:21,920 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1db721bf{HTTP/1.1, (http/1.1)}{localhost:44131} 2024-12-08T05:50:21,920 INFO [Time-limited test {}] server.Server(415): Started @102104ms 2024-12-08T05:50:21,921 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T05:50:22,023 WARN [Thread-462 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e3c429d-07a8-c096-263c-b984a35e58ea/cluster_17cc2087-deb6-209d-14bf-6c02334f53a5/data/data3/current/BP-2103863792-172.17.0.2-1733637021354/current, will proceed with Du for space computation calculation, 2024-12-08T05:50:22,023 WARN [Thread-463 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e3c429d-07a8-c096-263c-b984a35e58ea/cluster_17cc2087-deb6-209d-14bf-6c02334f53a5/data/data4/current/BP-2103863792-172.17.0.2-1733637021354/current, will proceed with Du for space computation calculation, 2024-12-08T05:50:22,040 WARN [Thread-451 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T05:50:22,042 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6e5f04e0d22aac49 with lease ID 0x69478177fbd8b5ba: Processing first storage report for DS-03ea4ad4-c145-47ae-bb91-6253be5dc961 from datanode DatanodeRegistration(127.0.0.1:39311, datanodeUuid=8fe6da25-b68a-40a0-8fa4-74331eedf377, infoPort=34335, infoSecurePort=0, ipcPort=36451, storageInfo=lv=-57;cid=testClusterID;nsid=1996714429;c=1733637021354) 2024-12-08T05:50:22,043 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6e5f04e0d22aac49 with lease ID 0x69478177fbd8b5ba: from storage DS-03ea4ad4-c145-47ae-bb91-6253be5dc961 node DatanodeRegistration(127.0.0.1:39311, datanodeUuid=8fe6da25-b68a-40a0-8fa4-74331eedf377, infoPort=34335, infoSecurePort=0, ipcPort=36451, storageInfo=lv=-57;cid=testClusterID;nsid=1996714429;c=1733637021354), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T05:50:22,043 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6e5f04e0d22aac49 with lease ID 0x69478177fbd8b5ba: Processing first storage report for DS-4ee1241e-3ac9-4d0b-a07a-a98cb1b62911 from datanode DatanodeRegistration(127.0.0.1:39311, datanodeUuid=8fe6da25-b68a-40a0-8fa4-74331eedf377, infoPort=34335, infoSecurePort=0, ipcPort=36451, storageInfo=lv=-57;cid=testClusterID;nsid=1996714429;c=1733637021354) 2024-12-08T05:50:22,043 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6e5f04e0d22aac49 with lease ID 0x69478177fbd8b5ba: from storage DS-4ee1241e-3ac9-4d0b-a07a-a98cb1b62911 node DatanodeRegistration(127.0.0.1:39311, datanodeUuid=8fe6da25-b68a-40a0-8fa4-74331eedf377, infoPort=34335, infoSecurePort=0, ipcPort=36451, storageInfo=lv=-57;cid=testClusterID;nsid=1996714429;c=1733637021354), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T05:50:22,049 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e3c429d-07a8-c096-263c-b984a35e58ea 2024-12-08T05:50:22,052 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e3c429d-07a8-c096-263c-b984a35e58ea/cluster_17cc2087-deb6-209d-14bf-6c02334f53a5/zookeeper_0, clientPort=56184, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e3c429d-07a8-c096-263c-b984a35e58ea/cluster_17cc2087-deb6-209d-14bf-6c02334f53a5/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e3c429d-07a8-c096-263c-b984a35e58ea/cluster_17cc2087-deb6-209d-14bf-6c02334f53a5/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-08T05:50:22,053 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=56184 2024-12-08T05:50:22,053 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:50:22,054 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:50:22,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39311 is added to blk_1073741825_1001 (size=7) 2024-12-08T05:50:22,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38399 is added to blk_1073741825_1001 (size=7) 2024-12-08T05:50:22,066 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:44287/user/jenkins/test-data/302ecdcd-010d-8259-e5fa-f1da0c6eadba with version=8 2024-12-08T05:50:22,066 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/hbase-staging 2024-12-08T05:50:22,068 INFO [Time-limited test {}] client.ConnectionUtils(128): master/0d942cb2025d:0 server-side Connection retries=45 2024-12-08T05:50:22,068 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T05:50:22,068 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T05:50:22,068 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T05:50:22,068 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T05:50:22,068 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T05:50:22,069 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-08T05:50:22,069 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T05:50:22,069 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46229 2024-12-08T05:50:22,071 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:46229 connecting to ZooKeeper ensemble=127.0.0.1:56184 2024-12-08T05:50:22,076 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:462290x0, quorum=127.0.0.1:56184, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T05:50:22,076 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:46229-0x101909f8bf30000 connected 2024-12-08T05:50:22,090 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:50:22,092 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:50:22,095 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46229-0x101909f8bf30000, quorum=127.0.0.1:56184, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T05:50:22,095 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:44287/user/jenkins/test-data/302ecdcd-010d-8259-e5fa-f1da0c6eadba, hbase.cluster.distributed=false 2024-12-08T05:50:22,097 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46229-0x101909f8bf30000, quorum=127.0.0.1:56184, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T05:50:22,098 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46229 2024-12-08T05:50:22,098 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46229 2024-12-08T05:50:22,098 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46229 2024-12-08T05:50:22,099 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46229 2024-12-08T05:50:22,099 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46229 2024-12-08T05:50:22,116 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/0d942cb2025d:0 server-side Connection retries=45 2024-12-08T05:50:22,117 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T05:50:22,117 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T05:50:22,117 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T05:50:22,117 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T05:50:22,117 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T05:50:22,117 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-08T05:50:22,117 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T05:50:22,118 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45483 2024-12-08T05:50:22,120 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45483 connecting to ZooKeeper ensemble=127.0.0.1:56184 2024-12-08T05:50:22,121 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:50:22,124 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:50:22,130 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:454830x0, quorum=127.0.0.1:56184, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T05:50:22,131 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:454830x0, quorum=127.0.0.1:56184, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T05:50:22,131 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45483-0x101909f8bf30001 connected 2024-12-08T05:50:22,131 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-08T05:50:22,132 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-08T05:50:22,133 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45483-0x101909f8bf30001, quorum=127.0.0.1:56184, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-08T05:50:22,134 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45483-0x101909f8bf30001, quorum=127.0.0.1:56184, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T05:50:22,134 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45483 2024-12-08T05:50:22,136 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45483 2024-12-08T05:50:22,140 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45483 2024-12-08T05:50:22,140 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45483 2024-12-08T05:50:22,141 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45483 2024-12-08T05:50:22,153 DEBUG [M:0;0d942cb2025d:46229 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;0d942cb2025d:46229 2024-12-08T05:50:22,154 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/0d942cb2025d,46229,1733637022068 2024-12-08T05:50:22,155 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45483-0x101909f8bf30001, quorum=127.0.0.1:56184, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T05:50:22,155 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46229-0x101909f8bf30000, quorum=127.0.0.1:56184, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T05:50:22,156 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46229-0x101909f8bf30000, quorum=127.0.0.1:56184, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/0d942cb2025d,46229,1733637022068 2024-12-08T05:50:22,158 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46229-0x101909f8bf30000, quorum=127.0.0.1:56184, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:50:22,158 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45483-0x101909f8bf30001, quorum=127.0.0.1:56184, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-08T05:50:22,158 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45483-0x101909f8bf30001, quorum=127.0.0.1:56184, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:50:22,158 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46229-0x101909f8bf30000, quorum=127.0.0.1:56184, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-08T05:50:22,159 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/0d942cb2025d,46229,1733637022068 from backup master directory 2024-12-08T05:50:22,161 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46229-0x101909f8bf30000, quorum=127.0.0.1:56184, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/0d942cb2025d,46229,1733637022068 2024-12-08T05:50:22,161 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45483-0x101909f8bf30001, quorum=127.0.0.1:56184, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T05:50:22,161 WARN [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T05:50:22,161 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46229-0x101909f8bf30000, quorum=127.0.0.1:56184, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T05:50:22,161 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=0d942cb2025d,46229,1733637022068 2024-12-08T05:50:22,170 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:44287/user/jenkins/test-data/302ecdcd-010d-8259-e5fa-f1da0c6eadba/hbase.id] with ID: eee9caec-0b9b-49b9-90ea-83e03a821c6b 2024-12-08T05:50:22,170 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:44287/user/jenkins/test-data/302ecdcd-010d-8259-e5fa-f1da0c6eadba/.tmp/hbase.id 2024-12-08T05:50:22,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38399 is added to blk_1073741826_1002 (size=42) 2024-12-08T05:50:22,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39311 is added to blk_1073741826_1002 (size=42) 2024-12-08T05:50:22,180 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:44287/user/jenkins/test-data/302ecdcd-010d-8259-e5fa-f1da0c6eadba/.tmp/hbase.id]:[hdfs://localhost:44287/user/jenkins/test-data/302ecdcd-010d-8259-e5fa-f1da0c6eadba/hbase.id] 2024-12-08T05:50:22,196 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:50:22,196 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-08T05:50:22,199 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-12-08T05:50:22,201 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46229-0x101909f8bf30000, quorum=127.0.0.1:56184, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:50:22,201 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45483-0x101909f8bf30001, quorum=127.0.0.1:56184, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:50:22,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38399 is added to blk_1073741827_1003 (size=196) 2024-12-08T05:50:22,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39311 is added to blk_1073741827_1003 (size=196) 2024-12-08T05:50:22,213 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T05:50:22,214 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-08T05:50:22,214 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T05:50:22,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39311 is added to blk_1073741828_1004 (size=1189) 2024-12-08T05:50:22,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38399 is added to blk_1073741828_1004 (size=1189) 2024-12-08T05:50:22,223 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:44287/user/jenkins/test-data/302ecdcd-010d-8259-e5fa-f1da0c6eadba/MasterData/data/master/store 2024-12-08T05:50:22,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39311 is added to blk_1073741829_1005 (size=34) 2024-12-08T05:50:22,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38399 is added to blk_1073741829_1005 (size=34) 2024-12-08T05:50:22,231 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T05:50:22,231 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-08T05:50:22,231 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:50:22,231 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:50:22,231 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-08T05:50:22,231 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:50:22,231 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:50:22,232 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733637022231Disabling compacts and flushes for region at 1733637022231Disabling writes for close at 1733637022231Writing region close event to WAL at 1733637022231Closed at 1733637022231 2024-12-08T05:50:22,233 WARN [master/0d942cb2025d:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:44287/user/jenkins/test-data/302ecdcd-010d-8259-e5fa-f1da0c6eadba/MasterData/data/master/store/.initializing 2024-12-08T05:50:22,233 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:44287/user/jenkins/test-data/302ecdcd-010d-8259-e5fa-f1da0c6eadba/MasterData/WALs/0d942cb2025d,46229,1733637022068 2024-12-08T05:50:22,237 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0d942cb2025d%2C46229%2C1733637022068, suffix=, logDir=hdfs://localhost:44287/user/jenkins/test-data/302ecdcd-010d-8259-e5fa-f1da0c6eadba/MasterData/WALs/0d942cb2025d,46229,1733637022068, archiveDir=hdfs://localhost:44287/user/jenkins/test-data/302ecdcd-010d-8259-e5fa-f1da0c6eadba/MasterData/oldWALs, maxLogs=10 2024-12-08T05:50:22,237 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0d942cb2025d%2C46229%2C1733637022068.1733637022237 2024-12-08T05:50:22,245 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/302ecdcd-010d-8259-e5fa-f1da0c6eadba/MasterData/WALs/0d942cb2025d,46229,1733637022068/0d942cb2025d%2C46229%2C1733637022068.1733637022237 2024-12-08T05:50:22,248 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34335:34335),(127.0.0.1/127.0.0.1:37927:37927)] 2024-12-08T05:50:22,249 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-08T05:50:22,249 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T05:50:22,249 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:50:22,249 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:50:22,251 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:50:22,253 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-08T05:50:22,253 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:50:22,254 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:50:22,254 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:50:22,256 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-08T05:50:22,256 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:50:22,256 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T05:50:22,257 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:50:22,259 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-08T05:50:22,260 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:50:22,260 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T05:50:22,260 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:50:22,262 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-08T05:50:22,262 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:50:22,262 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T05:50:22,262 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:50:22,263 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44287/user/jenkins/test-data/302ecdcd-010d-8259-e5fa-f1da0c6eadba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:50:22,264 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44287/user/jenkins/test-data/302ecdcd-010d-8259-e5fa-f1da0c6eadba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:50:22,266 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:50:22,266 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:50:22,266 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-08T05:50:22,268 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:50:22,270 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44287/user/jenkins/test-data/302ecdcd-010d-8259-e5fa-f1da0c6eadba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T05:50:22,271 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=808176, jitterRate=0.0276491641998291}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-08T05:50:22,272 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733637022250Initializing all the Stores at 1733637022251 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733637022251Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733637022251Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733637022251Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733637022251Cleaning up temporary data from old regions at 1733637022266 (+15 ms)Region opened successfully at 1733637022272 (+6 ms) 2024-12-08T05:50:22,273 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-08T05:50:22,277 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3ad0f58b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0d942cb2025d/172.17.0.2:0 2024-12-08T05:50:22,278 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-08T05:50:22,278 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-08T05:50:22,278 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-08T05:50:22,278 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-08T05:50:22,279 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-08T05:50:22,280 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-08T05:50:22,280 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-08T05:50:22,282 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-08T05:50:22,283 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46229-0x101909f8bf30000, quorum=127.0.0.1:56184, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-08T05:50:22,284 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-08T05:50:22,285 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-08T05:50:22,286 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46229-0x101909f8bf30000, quorum=127.0.0.1:56184, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-08T05:50:22,287 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-08T05:50:22,287 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-08T05:50:22,288 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46229-0x101909f8bf30000, quorum=127.0.0.1:56184, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-08T05:50:22,290 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-08T05:50:22,291 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46229-0x101909f8bf30000, quorum=127.0.0.1:56184, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-08T05:50:22,292 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-08T05:50:22,294 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46229-0x101909f8bf30000, quorum=127.0.0.1:56184, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-08T05:50:22,296 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-08T05:50:22,297 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46229-0x101909f8bf30000, quorum=127.0.0.1:56184, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T05:50:22,297 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45483-0x101909f8bf30001, quorum=127.0.0.1:56184, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T05:50:22,297 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46229-0x101909f8bf30000, quorum=127.0.0.1:56184, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:50:22,297 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45483-0x101909f8bf30001, quorum=127.0.0.1:56184, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:50:22,298 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=0d942cb2025d,46229,1733637022068, sessionid=0x101909f8bf30000, setting cluster-up flag (Was=false) 2024-12-08T05:50:22,302 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45483-0x101909f8bf30001, quorum=127.0.0.1:56184, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:50:22,302 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46229-0x101909f8bf30000, quorum=127.0.0.1:56184, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:50:22,308 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-08T05:50:22,309 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0d942cb2025d,46229,1733637022068 2024-12-08T05:50:22,312 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45483-0x101909f8bf30001, quorum=127.0.0.1:56184, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:50:22,312 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46229-0x101909f8bf30000, quorum=127.0.0.1:56184, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:50:22,318 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-08T05:50:22,319 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0d942cb2025d,46229,1733637022068 2024-12-08T05:50:22,320 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:44287/user/jenkins/test-data/302ecdcd-010d-8259-e5fa-f1da0c6eadba/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-08T05:50:22,322 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-08T05:50:22,322 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-08T05:50:22,322 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-08T05:50:22,322 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 0d942cb2025d,46229,1733637022068 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-08T05:50:22,324 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/0d942cb2025d:0, corePoolSize=5, maxPoolSize=5 2024-12-08T05:50:22,324 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/0d942cb2025d:0, corePoolSize=5, maxPoolSize=5 2024-12-08T05:50:22,324 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/0d942cb2025d:0, corePoolSize=5, maxPoolSize=5 2024-12-08T05:50:22,324 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/0d942cb2025d:0, corePoolSize=5, maxPoolSize=5 2024-12-08T05:50:22,324 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/0d942cb2025d:0, corePoolSize=10, maxPoolSize=10 2024-12-08T05:50:22,324 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:50:22,324 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/0d942cb2025d:0, corePoolSize=2, maxPoolSize=2 2024-12-08T05:50:22,324 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:50:22,326 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733637052326 2024-12-08T05:50:22,326 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-08T05:50:22,326 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-08T05:50:22,326 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-08T05:50:22,326 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-08T05:50:22,326 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-08T05:50:22,326 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-08T05:50:22,326 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-08T05:50:22,326 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T05:50:22,327 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-08T05:50:22,327 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-08T05:50:22,327 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-08T05:50:22,327 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-08T05:50:22,327 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-08T05:50:22,327 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-08T05:50:22,328 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/0d942cb2025d:0:becomeActiveMaster-HFileCleaner.large.0-1733637022327,5,FailOnTimeoutGroup] 2024-12-08T05:50:22,328 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/0d942cb2025d:0:becomeActiveMaster-HFileCleaner.small.0-1733637022328,5,FailOnTimeoutGroup] 2024-12-08T05:50:22,328 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-08T05:50:22,328 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-08T05:50:22,328 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:50:22,328 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-08T05:50:22,328 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-08T05:50:22,328 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-08T05:50:22,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38399 is added to blk_1073741831_1007 (size=1321) 2024-12-08T05:50:22,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39311 is added to blk_1073741831_1007 (size=1321) 2024-12-08T05:50:22,337 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:44287/user/jenkins/test-data/302ecdcd-010d-8259-e5fa-f1da0c6eadba/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-08T05:50:22,337 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44287/user/jenkins/test-data/302ecdcd-010d-8259-e5fa-f1da0c6eadba 2024-12-08T05:50:22,342 INFO [RS:0;0d942cb2025d:45483 {}] regionserver.HRegionServer(746): ClusterId : eee9caec-0b9b-49b9-90ea-83e03a821c6b 2024-12-08T05:50:22,342 DEBUG [RS:0;0d942cb2025d:45483 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-08T05:50:22,345 DEBUG [RS:0;0d942cb2025d:45483 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-08T05:50:22,345 DEBUG [RS:0;0d942cb2025d:45483 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-08T05:50:22,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38399 is added to blk_1073741832_1008 (size=32) 2024-12-08T05:50:22,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39311 is added to blk_1073741832_1008 (size=32) 2024-12-08T05:50:22,346 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T05:50:22,348 DEBUG [RS:0;0d942cb2025d:45483 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-08T05:50:22,348 DEBUG [RS:0;0d942cb2025d:45483 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2873c505, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0d942cb2025d/172.17.0.2:0 2024-12-08T05:50:22,348 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-08T05:50:22,350 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-08T05:50:22,350 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:50:22,351 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:50:22,351 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-08T05:50:22,352 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-08T05:50:22,352 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:50:22,353 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:50:22,353 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-08T05:50:22,355 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-08T05:50:22,355 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:50:22,356 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:50:22,356 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-08T05:50:22,357 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-08T05:50:22,357 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:50:22,358 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:50:22,358 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-08T05:50:22,359 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44287/user/jenkins/test-data/302ecdcd-010d-8259-e5fa-f1da0c6eadba/data/hbase/meta/1588230740 2024-12-08T05:50:22,359 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44287/user/jenkins/test-data/302ecdcd-010d-8259-e5fa-f1da0c6eadba/data/hbase/meta/1588230740 2024-12-08T05:50:22,360 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-08T05:50:22,361 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-08T05:50:22,361 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-08T05:50:22,362 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-08T05:50:22,363 DEBUG [RS:0;0d942cb2025d:45483 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;0d942cb2025d:45483 2024-12-08T05:50:22,363 INFO [RS:0;0d942cb2025d:45483 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-08T05:50:22,363 INFO [RS:0;0d942cb2025d:45483 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-08T05:50:22,363 DEBUG [RS:0;0d942cb2025d:45483 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-08T05:50:22,364 INFO [RS:0;0d942cb2025d:45483 {}] regionserver.HRegionServer(2659): reportForDuty to master=0d942cb2025d,46229,1733637022068 with port=45483, startcode=1733637022116 2024-12-08T05:50:22,364 DEBUG [RS:0;0d942cb2025d:45483 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-08T05:50:22,365 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44287/user/jenkins/test-data/302ecdcd-010d-8259-e5fa-f1da0c6eadba/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T05:50:22,366 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=874877, jitterRate=0.11246415972709656}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-08T05:50:22,367 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59665, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-08T05:50:22,367 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733637022346Initializing all the Stores at 1733637022348 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733637022348Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733637022348Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733637022348Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733637022348Cleaning up temporary data from old regions at 1733637022361 (+13 ms)Region opened successfully at 1733637022367 (+6 ms) 2024-12-08T05:50:22,368 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-08T05:50:22,368 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-08T05:50:22,368 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-08T05:50:22,368 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-08T05:50:22,368 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-08T05:50:22,368 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46229 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 0d942cb2025d,45483,1733637022116 2024-12-08T05:50:22,368 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46229 {}] master.ServerManager(517): Registering regionserver=0d942cb2025d,45483,1733637022116 2024-12-08T05:50:22,368 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-08T05:50:22,368 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733637022368Disabling compacts and flushes for region at 1733637022368Disabling writes for close at 1733637022368Writing region close event to WAL at 1733637022368Closed at 1733637022368 2024-12-08T05:50:22,370 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T05:50:22,370 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-08T05:50:22,370 DEBUG [RS:0;0d942cb2025d:45483 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44287/user/jenkins/test-data/302ecdcd-010d-8259-e5fa-f1da0c6eadba 2024-12-08T05:50:22,370 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-08T05:50:22,370 DEBUG [RS:0;0d942cb2025d:45483 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44287 2024-12-08T05:50:22,370 DEBUG [RS:0;0d942cb2025d:45483 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-08T05:50:22,372 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-08T05:50:22,373 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46229-0x101909f8bf30000, quorum=127.0.0.1:56184, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T05:50:22,373 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-08T05:50:22,374 DEBUG [RS:0;0d942cb2025d:45483 {}] zookeeper.ZKUtil(111): regionserver:45483-0x101909f8bf30001, quorum=127.0.0.1:56184, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/0d942cb2025d,45483,1733637022116 2024-12-08T05:50:22,374 WARN [RS:0;0d942cb2025d:45483 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T05:50:22,374 INFO [RS:0;0d942cb2025d:45483 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T05:50:22,374 DEBUG [RS:0;0d942cb2025d:45483 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44287/user/jenkins/test-data/302ecdcd-010d-8259-e5fa-f1da0c6eadba/WALs/0d942cb2025d,45483,1733637022116 2024-12-08T05:50:22,380 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [0d942cb2025d,45483,1733637022116] 2024-12-08T05:50:22,383 INFO [RS:0;0d942cb2025d:45483 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-08T05:50:22,386 INFO [RS:0;0d942cb2025d:45483 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-08T05:50:22,388 INFO [RS:0;0d942cb2025d:45483 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-08T05:50:22,388 INFO [RS:0;0d942cb2025d:45483 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T05:50:22,392 INFO [RS:0;0d942cb2025d:45483 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-08T05:50:22,393 INFO [RS:0;0d942cb2025d:45483 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-08T05:50:22,393 INFO [RS:0;0d942cb2025d:45483 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-08T05:50:22,393 DEBUG [RS:0;0d942cb2025d:45483 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:50:22,393 DEBUG [RS:0;0d942cb2025d:45483 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:50:22,393 DEBUG [RS:0;0d942cb2025d:45483 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:50:22,394 DEBUG [RS:0;0d942cb2025d:45483 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:50:22,394 DEBUG [RS:0;0d942cb2025d:45483 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:50:22,394 DEBUG [RS:0;0d942cb2025d:45483 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/0d942cb2025d:0, corePoolSize=2, maxPoolSize=2 2024-12-08T05:50:22,394 DEBUG [RS:0;0d942cb2025d:45483 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:50:22,394 DEBUG [RS:0;0d942cb2025d:45483 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:50:22,394 DEBUG [RS:0;0d942cb2025d:45483 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:50:22,394 DEBUG [RS:0;0d942cb2025d:45483 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:50:22,394 DEBUG [RS:0;0d942cb2025d:45483 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:50:22,394 DEBUG [RS:0;0d942cb2025d:45483 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:50:22,394 DEBUG [RS:0;0d942cb2025d:45483 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/0d942cb2025d:0, corePoolSize=3, maxPoolSize=3 2024-12-08T05:50:22,394 DEBUG [RS:0;0d942cb2025d:45483 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0, corePoolSize=3, maxPoolSize=3 2024-12-08T05:50:22,394 INFO [RS:0;0d942cb2025d:45483 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T05:50:22,395 INFO [RS:0;0d942cb2025d:45483 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T05:50:22,395 INFO [RS:0;0d942cb2025d:45483 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T05:50:22,395 INFO [RS:0;0d942cb2025d:45483 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-08T05:50:22,395 INFO [RS:0;0d942cb2025d:45483 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-08T05:50:22,395 INFO [RS:0;0d942cb2025d:45483 {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,45483,1733637022116-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T05:50:22,411 INFO [RS:0;0d942cb2025d:45483 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-08T05:50:22,411 INFO [RS:0;0d942cb2025d:45483 {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,45483,1733637022116-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T05:50:22,411 INFO [RS:0;0d942cb2025d:45483 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T05:50:22,411 INFO [RS:0;0d942cb2025d:45483 {}] regionserver.Replication(171): 0d942cb2025d,45483,1733637022116 started 2024-12-08T05:50:22,427 INFO [RS:0;0d942cb2025d:45483 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T05:50:22,427 INFO [RS:0;0d942cb2025d:45483 {}] regionserver.HRegionServer(1482): Serving as 0d942cb2025d,45483,1733637022116, RpcServer on 0d942cb2025d/172.17.0.2:45483, sessionid=0x101909f8bf30001 2024-12-08T05:50:22,427 DEBUG [RS:0;0d942cb2025d:45483 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-08T05:50:22,427 DEBUG [RS:0;0d942cb2025d:45483 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 0d942cb2025d,45483,1733637022116 2024-12-08T05:50:22,427 DEBUG [RS:0;0d942cb2025d:45483 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0d942cb2025d,45483,1733637022116' 2024-12-08T05:50:22,427 DEBUG [RS:0;0d942cb2025d:45483 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-08T05:50:22,428 DEBUG [RS:0;0d942cb2025d:45483 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-08T05:50:22,429 DEBUG [RS:0;0d942cb2025d:45483 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-08T05:50:22,429 DEBUG [RS:0;0d942cb2025d:45483 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-08T05:50:22,429 DEBUG [RS:0;0d942cb2025d:45483 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 0d942cb2025d,45483,1733637022116 2024-12-08T05:50:22,429 DEBUG [RS:0;0d942cb2025d:45483 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0d942cb2025d,45483,1733637022116' 2024-12-08T05:50:22,429 DEBUG [RS:0;0d942cb2025d:45483 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-08T05:50:22,430 DEBUG [RS:0;0d942cb2025d:45483 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-08T05:50:22,430 DEBUG [RS:0;0d942cb2025d:45483 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-08T05:50:22,430 INFO [RS:0;0d942cb2025d:45483 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-08T05:50:22,430 INFO [RS:0;0d942cb2025d:45483 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-08T05:50:22,524 WARN [0d942cb2025d:46229 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-08T05:50:22,533 INFO [RS:0;0d942cb2025d:45483 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0d942cb2025d%2C45483%2C1733637022116, suffix=, logDir=hdfs://localhost:44287/user/jenkins/test-data/302ecdcd-010d-8259-e5fa-f1da0c6eadba/WALs/0d942cb2025d,45483,1733637022116, archiveDir=hdfs://localhost:44287/user/jenkins/test-data/302ecdcd-010d-8259-e5fa-f1da0c6eadba/oldWALs, maxLogs=32 2024-12-08T05:50:22,535 INFO [RS:0;0d942cb2025d:45483 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0d942cb2025d%2C45483%2C1733637022116.1733637022535 2024-12-08T05:50:22,542 INFO [RS:0;0d942cb2025d:45483 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/302ecdcd-010d-8259-e5fa-f1da0c6eadba/WALs/0d942cb2025d,45483,1733637022116/0d942cb2025d%2C45483%2C1733637022116.1733637022535 2024-12-08T05:50:22,543 DEBUG [RS:0;0d942cb2025d:45483 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37927:37927),(127.0.0.1/127.0.0.1:34335:34335)] 2024-12-08T05:50:22,774 DEBUG [0d942cb2025d:46229 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-08T05:50:22,775 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=0d942cb2025d,45483,1733637022116 2024-12-08T05:50:22,776 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0d942cb2025d,45483,1733637022116, state=OPENING 2024-12-08T05:50:22,778 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-08T05:50:22,779 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45483-0x101909f8bf30001, quorum=127.0.0.1:56184, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:50:22,779 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46229-0x101909f8bf30000, quorum=127.0.0.1:56184, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:50:22,780 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T05:50:22,780 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-08T05:50:22,780 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T05:50:22,780 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=0d942cb2025d,45483,1733637022116}] 2024-12-08T05:50:22,934 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-08T05:50:22,936 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58741, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-08T05:50:22,941 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-08T05:50:22,941 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T05:50:22,943 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0d942cb2025d%2C45483%2C1733637022116.meta, suffix=.meta, logDir=hdfs://localhost:44287/user/jenkins/test-data/302ecdcd-010d-8259-e5fa-f1da0c6eadba/WALs/0d942cb2025d,45483,1733637022116, archiveDir=hdfs://localhost:44287/user/jenkins/test-data/302ecdcd-010d-8259-e5fa-f1da0c6eadba/oldWALs, maxLogs=32 2024-12-08T05:50:22,945 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 0d942cb2025d%2C45483%2C1733637022116.meta.1733637022945.meta 2024-12-08T05:50:22,951 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/302ecdcd-010d-8259-e5fa-f1da0c6eadba/WALs/0d942cb2025d,45483,1733637022116/0d942cb2025d%2C45483%2C1733637022116.meta.1733637022945.meta 2024-12-08T05:50:22,954 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34335:34335),(127.0.0.1/127.0.0.1:37927:37927)] 2024-12-08T05:50:22,955 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-08T05:50:22,956 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-08T05:50:22,956 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-08T05:50:22,956 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-08T05:50:22,956 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-08T05:50:22,956 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T05:50:22,956 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-08T05:50:22,956 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-08T05:50:22,960 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-08T05:50:22,961 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-08T05:50:22,961 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:50:22,962 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:50:22,962 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-08T05:50:22,963 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-08T05:50:22,963 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:50:22,964 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:50:22,964 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-08T05:50:22,965 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-08T05:50:22,965 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:50:22,965 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:50:22,965 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-08T05:50:22,966 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-08T05:50:22,966 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:50:22,967 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:50:22,967 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-08T05:50:22,968 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44287/user/jenkins/test-data/302ecdcd-010d-8259-e5fa-f1da0c6eadba/data/hbase/meta/1588230740 2024-12-08T05:50:22,969 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44287/user/jenkins/test-data/302ecdcd-010d-8259-e5fa-f1da0c6eadba/data/hbase/meta/1588230740 2024-12-08T05:50:22,970 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-08T05:50:22,970 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-08T05:50:22,971 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-08T05:50:22,973 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-08T05:50:22,974 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=689458, jitterRate=-0.12330956757068634}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-08T05:50:22,974 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-08T05:50:22,976 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733637022957Writing region info on filesystem at 1733637022957Initializing all the Stores at 1733637022958 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733637022958Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733637022960 (+2 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733637022960Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733637022960Cleaning up temporary data from old regions at 1733637022970 (+10 ms)Running coprocessor post-open hooks at 1733637022974 (+4 ms)Region opened successfully at 1733637022976 (+2 ms) 2024-12-08T05:50:22,977 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733637022934 2024-12-08T05:50:22,981 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-08T05:50:22,981 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-08T05:50:22,982 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=0d942cb2025d,45483,1733637022116 2024-12-08T05:50:22,984 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0d942cb2025d,45483,1733637022116, state=OPEN 2024-12-08T05:50:22,989 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46229-0x101909f8bf30000, quorum=127.0.0.1:56184, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T05:50:22,989 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45483-0x101909f8bf30001, quorum=127.0.0.1:56184, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T05:50:22,989 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=0d942cb2025d,45483,1733637022116 2024-12-08T05:50:22,989 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T05:50:22,989 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T05:50:22,992 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-08T05:50:22,992 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=0d942cb2025d,45483,1733637022116 in 209 msec 2024-12-08T05:50:22,995 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-08T05:50:22,995 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 622 msec 2024-12-08T05:50:22,996 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T05:50:22,996 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-08T05:50:22,998 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-08T05:50:22,998 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=0d942cb2025d,45483,1733637022116, seqNum=-1] 2024-12-08T05:50:22,998 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T05:50:23,000 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35337, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T05:50:23,006 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 684 msec 2024-12-08T05:50:23,006 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733637023006, completionTime=-1 2024-12-08T05:50:23,007 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-08T05:50:23,007 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-08T05:50:23,009 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-08T05:50:23,009 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733637083009 2024-12-08T05:50:23,009 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733637143009 2024-12-08T05:50:23,009 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-08T05:50:23,009 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,46229,1733637022068-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T05:50:23,009 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,46229,1733637022068-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T05:50:23,009 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,46229,1733637022068-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T05:50:23,010 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-0d942cb2025d:46229, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T05:50:23,010 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-08T05:50:23,010 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-08T05:50:23,012 DEBUG [master/0d942cb2025d:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-08T05:50:23,015 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.854sec 2024-12-08T05:50:23,015 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-08T05:50:23,015 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-08T05:50:23,015 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-08T05:50:23,015 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-08T05:50:23,015 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-08T05:50:23,015 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,46229,1733637022068-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T05:50:23,016 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,46229,1733637022068-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-08T05:50:23,018 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-08T05:50:23,018 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-08T05:50:23,019 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,46229,1733637022068-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T05:50:23,043 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@53b08c9b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T05:50:23,043 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 0d942cb2025d,46229,-1 for getting cluster id 2024-12-08T05:50:23,043 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-08T05:50:23,045 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'eee9caec-0b9b-49b9-90ea-83e03a821c6b' 2024-12-08T05:50:23,046 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-08T05:50:23,046 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "eee9caec-0b9b-49b9-90ea-83e03a821c6b" 2024-12-08T05:50:23,047 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@358cc871, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T05:50:23,047 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [0d942cb2025d,46229,-1] 2024-12-08T05:50:23,047 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-08T05:50:23,048 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T05:50:23,049 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46336, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-08T05:50:23,050 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@35d5e0d4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T05:50:23,051 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-08T05:50:23,052 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=0d942cb2025d,45483,1733637022116, seqNum=-1] 2024-12-08T05:50:23,052 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T05:50:23,054 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58464, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T05:50:23,056 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=0d942cb2025d,46229,1733637022068 2024-12-08T05:50:23,057 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:50:23,060 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-08T05:50:23,060 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-08T05:50:23,060 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-08T05:50:23,061 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T05:50:23,061 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T05:50:23,061 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T05:50:23,061 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-08T05:50:23,061 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-08T05:50:23,061 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=599502189, stopped=false 2024-12-08T05:50:23,062 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=0d942cb2025d,46229,1733637022068 2024-12-08T05:50:23,063 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46229-0x101909f8bf30000, quorum=127.0.0.1:56184, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T05:50:23,063 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45483-0x101909f8bf30001, quorum=127.0.0.1:56184, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T05:50:23,063 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46229-0x101909f8bf30000, quorum=127.0.0.1:56184, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:50:23,063 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45483-0x101909f8bf30001, quorum=127.0.0.1:56184, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:50:23,063 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-08T05:50:23,064 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45483-0x101909f8bf30001, quorum=127.0.0.1:56184, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T05:50:23,064 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:46229-0x101909f8bf30000, quorum=127.0.0.1:56184, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T05:50:23,064 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-08T05:50:23,064 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T05:50:23,065 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T05:50:23,065 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '0d942cb2025d,45483,1733637022116' ***** 2024-12-08T05:50:23,065 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-08T05:50:23,065 INFO [RS:0;0d942cb2025d:45483 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-08T05:50:23,065 INFO [RS:0;0d942cb2025d:45483 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-08T05:50:23,065 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-08T05:50:23,065 INFO [RS:0;0d942cb2025d:45483 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-08T05:50:23,065 INFO [RS:0;0d942cb2025d:45483 {}] regionserver.HRegionServer(959): stopping server 0d942cb2025d,45483,1733637022116 2024-12-08T05:50:23,065 INFO [RS:0;0d942cb2025d:45483 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T05:50:23,066 INFO [RS:0;0d942cb2025d:45483 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;0d942cb2025d:45483. 2024-12-08T05:50:23,066 DEBUG [RS:0;0d942cb2025d:45483 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T05:50:23,066 DEBUG [RS:0;0d942cb2025d:45483 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T05:50:23,066 INFO [RS:0;0d942cb2025d:45483 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-08T05:50:23,066 INFO [RS:0;0d942cb2025d:45483 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-08T05:50:23,066 INFO [RS:0;0d942cb2025d:45483 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-08T05:50:23,066 INFO [RS:0;0d942cb2025d:45483 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-08T05:50:23,066 INFO [RS:0;0d942cb2025d:45483 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-08T05:50:23,066 DEBUG [RS:0;0d942cb2025d:45483 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-08T05:50:23,066 DEBUG [RS:0;0d942cb2025d:45483 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-08T05:50:23,066 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-08T05:50:23,066 INFO [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-08T05:50:23,067 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-08T05:50:23,067 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-08T05:50:23,067 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-08T05:50:23,067 INFO [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-12-08T05:50:23,085 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44287/user/jenkins/test-data/302ecdcd-010d-8259-e5fa-f1da0c6eadba/data/hbase/meta/1588230740/.tmp/ns/131e820f69c940878ff86ce4cc76b47c is 43, key is default/ns:d/1733637023000/Put/seqid=0 2024-12-08T05:50:23,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39311 is added to blk_1073741835_1011 (size=5153) 2024-12-08T05:50:23,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38399 is added to blk_1073741835_1011 (size=5153) 2024-12-08T05:50:23,091 INFO [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44287/user/jenkins/test-data/302ecdcd-010d-8259-e5fa-f1da0c6eadba/data/hbase/meta/1588230740/.tmp/ns/131e820f69c940878ff86ce4cc76b47c 2024-12-08T05:50:23,098 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44287/user/jenkins/test-data/302ecdcd-010d-8259-e5fa-f1da0c6eadba/data/hbase/meta/1588230740/.tmp/ns/131e820f69c940878ff86ce4cc76b47c as hdfs://localhost:44287/user/jenkins/test-data/302ecdcd-010d-8259-e5fa-f1da0c6eadba/data/hbase/meta/1588230740/ns/131e820f69c940878ff86ce4cc76b47c 2024-12-08T05:50:23,105 INFO [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44287/user/jenkins/test-data/302ecdcd-010d-8259-e5fa-f1da0c6eadba/data/hbase/meta/1588230740/ns/131e820f69c940878ff86ce4cc76b47c, entries=2, sequenceid=6, filesize=5.0 K 2024-12-08T05:50:23,106 INFO [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 39ms, sequenceid=6, compaction requested=false 2024-12-08T05:50:23,111 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44287/user/jenkins/test-data/302ecdcd-010d-8259-e5fa-f1da0c6eadba/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-08T05:50:23,111 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-08T05:50:23,112 INFO [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-08T05:50:23,112 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733637023066Running coprocessor pre-close hooks at 1733637023066Disabling compacts and flushes for region at 1733637023066Disabling writes for close at 1733637023067 (+1 ms)Obtaining lock to block concurrent updates at 1733637023067Preparing flush snapshotting stores in 1588230740 at 1733637023067Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1733637023067Flushing stores of hbase:meta,,1.1588230740 at 1733637023068 (+1 ms)Flushing 1588230740/ns: creating writer at 1733637023068Flushing 1588230740/ns: appending metadata at 1733637023084 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1733637023084Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@493f51d9: reopening flushed file at 1733637023098 (+14 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 39ms, sequenceid=6, compaction requested=false at 1733637023106 (+8 ms)Writing region close event to WAL at 1733637023107 (+1 ms)Running coprocessor post-close hooks at 1733637023111 (+4 ms)Closed at 1733637023112 (+1 ms) 2024-12-08T05:50:23,112 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-08T05:50:23,267 INFO [RS:0;0d942cb2025d:45483 {}] regionserver.HRegionServer(976): stopping server 0d942cb2025d,45483,1733637022116; all regions closed. 2024-12-08T05:50:23,267 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:23,267 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:23,267 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:23,268 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:23,268 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:23,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38399 is added to blk_1073741834_1010 (size=1152) 2024-12-08T05:50:23,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39311 is added to blk_1073741834_1010 (size=1152) 2024-12-08T05:50:23,273 DEBUG [RS:0;0d942cb2025d:45483 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/302ecdcd-010d-8259-e5fa-f1da0c6eadba/oldWALs 2024-12-08T05:50:23,274 INFO [RS:0;0d942cb2025d:45483 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 0d942cb2025d%2C45483%2C1733637022116.meta:.meta(num 1733637022945) 2024-12-08T05:50:23,274 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:23,274 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:23,274 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:23,274 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:23,274 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:23,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39311 is added to blk_1073741833_1009 (size=93) 2024-12-08T05:50:23,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38399 is added to blk_1073741833_1009 (size=93) 2024-12-08T05:50:23,279 DEBUG [RS:0;0d942cb2025d:45483 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/302ecdcd-010d-8259-e5fa-f1da0c6eadba/oldWALs 2024-12-08T05:50:23,279 INFO [RS:0;0d942cb2025d:45483 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 0d942cb2025d%2C45483%2C1733637022116:(num 1733637022535) 2024-12-08T05:50:23,279 DEBUG [RS:0;0d942cb2025d:45483 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T05:50:23,279 INFO [RS:0;0d942cb2025d:45483 {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T05:50:23,279 INFO [RS:0;0d942cb2025d:45483 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T05:50:23,279 INFO [RS:0;0d942cb2025d:45483 {}] hbase.ChoreService(370): Chore service for: regionserver/0d942cb2025d:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-08T05:50:23,280 INFO [RS:0;0d942cb2025d:45483 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T05:50:23,280 INFO [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T05:50:23,280 INFO [RS:0;0d942cb2025d:45483 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45483 2024-12-08T05:50:23,282 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46229-0x101909f8bf30000, quorum=127.0.0.1:56184, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T05:50:23,282 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45483-0x101909f8bf30001, quorum=127.0.0.1:56184, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/0d942cb2025d,45483,1733637022116 2024-12-08T05:50:23,282 INFO [RS:0;0d942cb2025d:45483 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T05:50:23,283 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [0d942cb2025d,45483,1733637022116] 2024-12-08T05:50:23,285 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/0d942cb2025d,45483,1733637022116 already deleted, retry=false 2024-12-08T05:50:23,285 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 0d942cb2025d,45483,1733637022116 expired; onlineServers=0 2024-12-08T05:50:23,285 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '0d942cb2025d,46229,1733637022068' ***** 2024-12-08T05:50:23,285 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-08T05:50:23,285 INFO [M:0;0d942cb2025d:46229 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T05:50:23,285 INFO [M:0;0d942cb2025d:46229 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T05:50:23,285 DEBUG [M:0;0d942cb2025d:46229 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-08T05:50:23,285 DEBUG [M:0;0d942cb2025d:46229 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-08T05:50:23,285 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-08T05:50:23,285 DEBUG [master/0d942cb2025d:0:becomeActiveMaster-HFileCleaner.large.0-1733637022327 {}] cleaner.HFileCleaner(306): Exit Thread[master/0d942cb2025d:0:becomeActiveMaster-HFileCleaner.large.0-1733637022327,5,FailOnTimeoutGroup] 2024-12-08T05:50:23,285 DEBUG [master/0d942cb2025d:0:becomeActiveMaster-HFileCleaner.small.0-1733637022328 {}] cleaner.HFileCleaner(306): Exit Thread[master/0d942cb2025d:0:becomeActiveMaster-HFileCleaner.small.0-1733637022328,5,FailOnTimeoutGroup] 2024-12-08T05:50:23,285 INFO [M:0;0d942cb2025d:46229 {}] hbase.ChoreService(370): Chore service for: master/0d942cb2025d:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-08T05:50:23,285 INFO [M:0;0d942cb2025d:46229 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T05:50:23,286 DEBUG [M:0;0d942cb2025d:46229 {}] master.HMaster(1795): Stopping service threads 2024-12-08T05:50:23,286 INFO [M:0;0d942cb2025d:46229 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-08T05:50:23,286 INFO [M:0;0d942cb2025d:46229 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-08T05:50:23,286 INFO [M:0;0d942cb2025d:46229 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-08T05:50:23,286 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-08T05:50:23,287 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46229-0x101909f8bf30000, quorum=127.0.0.1:56184, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-08T05:50:23,287 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46229-0x101909f8bf30000, quorum=127.0.0.1:56184, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:50:23,287 DEBUG [M:0;0d942cb2025d:46229 {}] zookeeper.ZKUtil(347): master:46229-0x101909f8bf30000, quorum=127.0.0.1:56184, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-08T05:50:23,287 WARN [M:0;0d942cb2025d:46229 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-08T05:50:23,287 INFO [M:0;0d942cb2025d:46229 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:44287/user/jenkins/test-data/302ecdcd-010d-8259-e5fa-f1da0c6eadba/.lastflushedseqids 2024-12-08T05:50:23,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38399 is added to blk_1073741836_1012 (size=99) 2024-12-08T05:50:23,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39311 is added to blk_1073741836_1012 (size=99) 2024-12-08T05:50:23,294 INFO [M:0;0d942cb2025d:46229 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-08T05:50:23,294 INFO [M:0;0d942cb2025d:46229 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-08T05:50:23,294 DEBUG [M:0;0d942cb2025d:46229 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-08T05:50:23,294 INFO [M:0;0d942cb2025d:46229 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:50:23,294 DEBUG [M:0;0d942cb2025d:46229 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:50:23,295 DEBUG [M:0;0d942cb2025d:46229 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-08T05:50:23,295 DEBUG [M:0;0d942cb2025d:46229 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:50:23,295 INFO [M:0;0d942cb2025d:46229 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-12-08T05:50:23,313 DEBUG [M:0;0d942cb2025d:46229 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44287/user/jenkins/test-data/302ecdcd-010d-8259-e5fa-f1da0c6eadba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3e7ec07535b54c38a2ffd747600c7a5f is 82, key is hbase:meta,,1/info:regioninfo/1733637022982/Put/seqid=0 2024-12-08T05:50:23,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39311 is added to blk_1073741837_1013 (size=5672) 2024-12-08T05:50:23,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38399 is added to blk_1073741837_1013 (size=5672) 2024-12-08T05:50:23,319 INFO [M:0;0d942cb2025d:46229 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:44287/user/jenkins/test-data/302ecdcd-010d-8259-e5fa-f1da0c6eadba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3e7ec07535b54c38a2ffd747600c7a5f 2024-12-08T05:50:23,342 DEBUG [M:0;0d942cb2025d:46229 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44287/user/jenkins/test-data/302ecdcd-010d-8259-e5fa-f1da0c6eadba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/185b6e637f2549afb1fe45027c44fe78 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1733637023005/Put/seqid=0 2024-12-08T05:50:23,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38399 is added to blk_1073741838_1014 (size=5275) 2024-12-08T05:50:23,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39311 is added to blk_1073741838_1014 (size=5275) 2024-12-08T05:50:23,349 INFO [M:0;0d942cb2025d:46229 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:44287/user/jenkins/test-data/302ecdcd-010d-8259-e5fa-f1da0c6eadba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/185b6e637f2549afb1fe45027c44fe78 2024-12-08T05:50:23,370 DEBUG [M:0;0d942cb2025d:46229 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44287/user/jenkins/test-data/302ecdcd-010d-8259-e5fa-f1da0c6eadba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0867299eceda45459efd28a4a2caa16f is 69, key is 0d942cb2025d,45483,1733637022116/rs:state/1733637022368/Put/seqid=0 2024-12-08T05:50:23,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38399 is added to blk_1073741839_1015 (size=5156) 2024-12-08T05:50:23,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39311 is added to blk_1073741839_1015 (size=5156) 2024-12-08T05:50:23,375 INFO [M:0;0d942cb2025d:46229 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:44287/user/jenkins/test-data/302ecdcd-010d-8259-e5fa-f1da0c6eadba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0867299eceda45459efd28a4a2caa16f 2024-12-08T05:50:23,383 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45483-0x101909f8bf30001, quorum=127.0.0.1:56184, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T05:50:23,383 INFO [RS:0;0d942cb2025d:45483 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T05:50:23,384 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45483-0x101909f8bf30001, quorum=127.0.0.1:56184, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T05:50:23,384 INFO [RS:0;0d942cb2025d:45483 {}] regionserver.HRegionServer(1031): Exiting; stopping=0d942cb2025d,45483,1733637022116; zookeeper connection closed. 2024-12-08T05:50:23,384 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@44a8dc72 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@44a8dc72 2024-12-08T05:50:23,384 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-08T05:50:23,397 DEBUG [M:0;0d942cb2025d:46229 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44287/user/jenkins/test-data/302ecdcd-010d-8259-e5fa-f1da0c6eadba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/454e585adaaa4b4ebc6d658a16be3c82 is 52, key is load_balancer_on/state:d/1733637023059/Put/seqid=0 2024-12-08T05:50:23,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39311 is added to blk_1073741840_1016 (size=5056) 2024-12-08T05:50:23,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38399 is added to blk_1073741840_1016 (size=5056) 2024-12-08T05:50:23,402 INFO [M:0;0d942cb2025d:46229 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:44287/user/jenkins/test-data/302ecdcd-010d-8259-e5fa-f1da0c6eadba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/454e585adaaa4b4ebc6d658a16be3c82 2024-12-08T05:50:23,408 DEBUG [M:0;0d942cb2025d:46229 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44287/user/jenkins/test-data/302ecdcd-010d-8259-e5fa-f1da0c6eadba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3e7ec07535b54c38a2ffd747600c7a5f as hdfs://localhost:44287/user/jenkins/test-data/302ecdcd-010d-8259-e5fa-f1da0c6eadba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/3e7ec07535b54c38a2ffd747600c7a5f 2024-12-08T05:50:23,414 INFO [M:0;0d942cb2025d:46229 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44287/user/jenkins/test-data/302ecdcd-010d-8259-e5fa-f1da0c6eadba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/3e7ec07535b54c38a2ffd747600c7a5f, entries=8, sequenceid=29, filesize=5.5 K 2024-12-08T05:50:23,415 DEBUG [M:0;0d942cb2025d:46229 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44287/user/jenkins/test-data/302ecdcd-010d-8259-e5fa-f1da0c6eadba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/185b6e637f2549afb1fe45027c44fe78 as hdfs://localhost:44287/user/jenkins/test-data/302ecdcd-010d-8259-e5fa-f1da0c6eadba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/185b6e637f2549afb1fe45027c44fe78 2024-12-08T05:50:23,420 INFO [M:0;0d942cb2025d:46229 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44287/user/jenkins/test-data/302ecdcd-010d-8259-e5fa-f1da0c6eadba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/185b6e637f2549afb1fe45027c44fe78, entries=3, sequenceid=29, filesize=5.2 K 2024-12-08T05:50:23,421 DEBUG [M:0;0d942cb2025d:46229 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44287/user/jenkins/test-data/302ecdcd-010d-8259-e5fa-f1da0c6eadba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0867299eceda45459efd28a4a2caa16f as hdfs://localhost:44287/user/jenkins/test-data/302ecdcd-010d-8259-e5fa-f1da0c6eadba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/0867299eceda45459efd28a4a2caa16f 2024-12-08T05:50:23,427 INFO [M:0;0d942cb2025d:46229 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44287/user/jenkins/test-data/302ecdcd-010d-8259-e5fa-f1da0c6eadba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/0867299eceda45459efd28a4a2caa16f, entries=1, sequenceid=29, filesize=5.0 K 2024-12-08T05:50:23,428 DEBUG [M:0;0d942cb2025d:46229 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44287/user/jenkins/test-data/302ecdcd-010d-8259-e5fa-f1da0c6eadba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/454e585adaaa4b4ebc6d658a16be3c82 as hdfs://localhost:44287/user/jenkins/test-data/302ecdcd-010d-8259-e5fa-f1da0c6eadba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/454e585adaaa4b4ebc6d658a16be3c82 2024-12-08T05:50:23,433 INFO [M:0;0d942cb2025d:46229 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44287/user/jenkins/test-data/302ecdcd-010d-8259-e5fa-f1da0c6eadba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/454e585adaaa4b4ebc6d658a16be3c82, entries=1, sequenceid=29, filesize=4.9 K 2024-12-08T05:50:23,434 INFO [M:0;0d942cb2025d:46229 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 139ms, sequenceid=29, compaction requested=false 2024-12-08T05:50:23,436 INFO [M:0;0d942cb2025d:46229 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:50:23,436 DEBUG [M:0;0d942cb2025d:46229 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733637023294Disabling compacts and flushes for region at 1733637023294Disabling writes for close at 1733637023295 (+1 ms)Obtaining lock to block concurrent updates at 1733637023295Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733637023295Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1733637023295Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733637023296 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733637023296Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733637023312 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733637023312Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733637023326 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733637023342 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733637023342Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733637023354 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733637023369 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733637023369Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733637023381 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733637023396 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733637023396Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@34c6ab84: reopening flushed file at 1733637023407 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@a58a22e: reopening flushed file at 1733637023414 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@442b460: reopening flushed file at 1733637023420 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@101002e0: reopening flushed file at 1733637023427 (+7 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 139ms, sequenceid=29, compaction requested=false at 1733637023434 (+7 ms)Writing region close event to WAL at 1733637023436 (+2 ms)Closed at 1733637023436 2024-12-08T05:50:23,437 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:23,437 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:23,437 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:23,437 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:23,437 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:23,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38399 is added to blk_1073741830_1006 (size=10311) 2024-12-08T05:50:23,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39311 is added to blk_1073741830_1006 (size=10311) 2024-12-08T05:50:23,440 INFO [M:0;0d942cb2025d:46229 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-08T05:50:23,440 INFO [M:0;0d942cb2025d:46229 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46229 2024-12-08T05:50:23,441 INFO [M:0;0d942cb2025d:46229 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T05:50:23,441 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T05:50:23,543 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46229-0x101909f8bf30000, quorum=127.0.0.1:56184, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T05:50:23,543 INFO [M:0;0d942cb2025d:46229 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T05:50:23,543 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46229-0x101909f8bf30000, quorum=127.0.0.1:56184, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T05:50:23,547 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3704b2b9{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T05:50:23,548 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1db721bf{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T05:50:23,548 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T05:50:23,548 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@57c7b86a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T05:50:23,548 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@58e3ba2d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e3c429d-07a8-c096-263c-b984a35e58ea/hadoop.log.dir/,STOPPED} 2024-12-08T05:50:23,550 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T05:50:23,550 WARN [BP-2103863792-172.17.0.2-1733637021354 heartbeating to localhost/127.0.0.1:44287 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T05:50:23,550 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T05:50:23,550 WARN [BP-2103863792-172.17.0.2-1733637021354 heartbeating to localhost/127.0.0.1:44287 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2103863792-172.17.0.2-1733637021354 (Datanode Uuid 8fe6da25-b68a-40a0-8fa4-74331eedf377) service to localhost/127.0.0.1:44287 2024-12-08T05:50:23,551 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e3c429d-07a8-c096-263c-b984a35e58ea/cluster_17cc2087-deb6-209d-14bf-6c02334f53a5/data/data3/current/BP-2103863792-172.17.0.2-1733637021354 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T05:50:23,551 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e3c429d-07a8-c096-263c-b984a35e58ea/cluster_17cc2087-deb6-209d-14bf-6c02334f53a5/data/data4/current/BP-2103863792-172.17.0.2-1733637021354 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T05:50:23,551 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T05:50:23,557 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3b4287c4{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T05:50:23,557 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@68e0715a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T05:50:23,557 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T05:50:23,557 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@144bd696{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T05:50:23,558 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@26c686af{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e3c429d-07a8-c096-263c-b984a35e58ea/hadoop.log.dir/,STOPPED} 2024-12-08T05:50:23,561 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T05:50:23,561 WARN [BP-2103863792-172.17.0.2-1733637021354 heartbeating to localhost/127.0.0.1:44287 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T05:50:23,561 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T05:50:23,561 WARN [BP-2103863792-172.17.0.2-1733637021354 heartbeating to localhost/127.0.0.1:44287 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2103863792-172.17.0.2-1733637021354 (Datanode Uuid 99676bc3-7d96-4cbc-ac91-38a34514767d) service to localhost/127.0.0.1:44287 2024-12-08T05:50:23,562 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e3c429d-07a8-c096-263c-b984a35e58ea/cluster_17cc2087-deb6-209d-14bf-6c02334f53a5/data/data1/current/BP-2103863792-172.17.0.2-1733637021354 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T05:50:23,562 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e3c429d-07a8-c096-263c-b984a35e58ea/cluster_17cc2087-deb6-209d-14bf-6c02334f53a5/data/data2/current/BP-2103863792-172.17.0.2-1733637021354 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T05:50:23,563 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T05:50:23,571 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@65adc0b2{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-08T05:50:23,572 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@55f1c948{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T05:50:23,572 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T05:50:23,572 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1ced705f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T05:50:23,573 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@166c3234{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e3c429d-07a8-c096-263c-b984a35e58ea/hadoop.log.dir/,STOPPED} 2024-12-08T05:50:23,579 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-08T05:50:23,596 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-08T05:50:23,596 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-08T05:50:23,597 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e3c429d-07a8-c096-263c-b984a35e58ea/hadoop.log.dir so I do NOT create it in target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794 2024-12-08T05:50:23,597 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e3c429d-07a8-c096-263c-b984a35e58ea/hadoop.tmp.dir so I do NOT create it in target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794 2024-12-08T05:50:23,597 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/cluster_ad2183ec-0874-c6b8-5c7f-1444c5a18074, deleteOnExit=true 2024-12-08T05:50:23,597 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-08T05:50:23,597 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/test.cache.data in system properties and HBase conf 2024-12-08T05:50:23,597 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/hadoop.tmp.dir in system properties and HBase conf 2024-12-08T05:50:23,597 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/hadoop.log.dir in system properties and HBase conf 2024-12-08T05:50:23,597 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-08T05:50:23,597 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-08T05:50:23,597 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-08T05:50:23,597 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-08T05:50:23,598 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-08T05:50:23,598 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-08T05:50:23,598 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-08T05:50:23,598 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-08T05:50:23,598 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-08T05:50:23,598 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-08T05:50:23,598 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-08T05:50:23,598 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-08T05:50:23,598 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-08T05:50:23,598 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/nfs.dump.dir in system properties and HBase conf 2024-12-08T05:50:23,599 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/java.io.tmpdir in system properties and HBase conf 2024-12-08T05:50:23,599 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-08T05:50:23,599 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-08T05:50:23,599 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-08T05:50:23,613 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-08T05:50:23,706 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T05:50:23,718 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T05:50:23,720 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T05:50:23,720 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T05:50:23,720 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-08T05:50:23,725 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T05:50:23,726 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4c83390f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/hadoop.log.dir/,AVAILABLE} 2024-12-08T05:50:23,726 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2e510a6a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T05:50:23,871 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7cf66d4d{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/java.io.tmpdir/jetty-localhost-42273-hadoop-hdfs-3_4_1-tests_jar-_-any-17781291790191999616/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-08T05:50:23,872 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@24f9a05e{HTTP/1.1, (http/1.1)}{localhost:42273} 2024-12-08T05:50:23,872 INFO [Time-limited test {}] server.Server(415): Started @104056ms 2024-12-08T05:50:23,893 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-08T05:50:23,971 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T05:50:23,974 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T05:50:23,975 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T05:50:23,975 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T05:50:23,975 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T05:50:23,976 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4475d850{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/hadoop.log.dir/,AVAILABLE} 2024-12-08T05:50:23,976 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1d9d8e2c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T05:50:24,092 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@30a12d6d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/java.io.tmpdir/jetty-localhost-45655-hadoop-hdfs-3_4_1-tests_jar-_-any-5158491388692305303/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T05:50:24,092 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@441f58be{HTTP/1.1, (http/1.1)}{localhost:45655} 2024-12-08T05:50:24,092 INFO [Time-limited test {}] server.Server(415): Started @104276ms 2024-12-08T05:50:24,094 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T05:50:24,126 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T05:50:24,129 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T05:50:24,130 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T05:50:24,130 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T05:50:24,131 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-08T05:50:24,133 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2ed0b53{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/hadoop.log.dir/,AVAILABLE} 2024-12-08T05:50:24,133 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@82e7b75{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T05:50:24,213 WARN [Thread-656 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/cluster_ad2183ec-0874-c6b8-5c7f-1444c5a18074/data/data2/current/BP-1312343861-172.17.0.2-1733637023632/current, will proceed with Du for space computation calculation, 2024-12-08T05:50:24,213 WARN [Thread-655 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/cluster_ad2183ec-0874-c6b8-5c7f-1444c5a18074/data/data1/current/BP-1312343861-172.17.0.2-1733637023632/current, will proceed with Du for space computation calculation, 2024-12-08T05:50:24,237 WARN [Thread-634 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T05:50:24,240 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x550042790dfac4f5 with lease ID 0x7206f96d451258b6: Processing first storage report for DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a from datanode DatanodeRegistration(127.0.0.1:42141, datanodeUuid=4ee35e7b-40eb-46c8-a64d-d562f2b8508e, infoPort=36269, infoSecurePort=0, ipcPort=40781, storageInfo=lv=-57;cid=testClusterID;nsid=341549530;c=1733637023632) 2024-12-08T05:50:24,240 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x550042790dfac4f5 with lease ID 0x7206f96d451258b6: from storage DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a node DatanodeRegistration(127.0.0.1:42141, datanodeUuid=4ee35e7b-40eb-46c8-a64d-d562f2b8508e, infoPort=36269, infoSecurePort=0, ipcPort=40781, storageInfo=lv=-57;cid=testClusterID;nsid=341549530;c=1733637023632), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T05:50:24,240 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x550042790dfac4f5 with lease ID 0x7206f96d451258b6: Processing first storage report for DS-e7d5fd8d-ccd6-4a9d-aba8-757396a84769 from datanode DatanodeRegistration(127.0.0.1:42141, datanodeUuid=4ee35e7b-40eb-46c8-a64d-d562f2b8508e, infoPort=36269, infoSecurePort=0, ipcPort=40781, storageInfo=lv=-57;cid=testClusterID;nsid=341549530;c=1733637023632) 2024-12-08T05:50:24,240 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x550042790dfac4f5 with lease ID 0x7206f96d451258b6: from storage DS-e7d5fd8d-ccd6-4a9d-aba8-757396a84769 node DatanodeRegistration(127.0.0.1:42141, datanodeUuid=4ee35e7b-40eb-46c8-a64d-d562f2b8508e, infoPort=36269, infoSecurePort=0, ipcPort=40781, storageInfo=lv=-57;cid=testClusterID;nsid=341549530;c=1733637023632), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T05:50:24,256 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2053fa2a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/java.io.tmpdir/jetty-localhost-41507-hadoop-hdfs-3_4_1-tests_jar-_-any-17479575772396282725/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T05:50:24,256 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5b8a0b06{HTTP/1.1, (http/1.1)}{localhost:41507} 2024-12-08T05:50:24,256 INFO [Time-limited test {}] server.Server(415): Started @104440ms 2024-12-08T05:50:24,258 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T05:50:24,361 WARN [Thread-681 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/cluster_ad2183ec-0874-c6b8-5c7f-1444c5a18074/data/data3/current/BP-1312343861-172.17.0.2-1733637023632/current, will proceed with Du for space computation calculation, 2024-12-08T05:50:24,365 WARN [Thread-682 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/cluster_ad2183ec-0874-c6b8-5c7f-1444c5a18074/data/data4/current/BP-1312343861-172.17.0.2-1733637023632/current, will proceed with Du for space computation calculation, 2024-12-08T05:50:24,395 INFO [regionserver/0d942cb2025d:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T05:50:24,396 WARN [Thread-670 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T05:50:24,399 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x586fc600ee1d39d4 with lease ID 0x7206f96d451258b7: Processing first storage report for DS-3ebf26fc-f51c-4143-acc9-f93fae9ced93 from datanode DatanodeRegistration(127.0.0.1:40747, datanodeUuid=efd53946-5941-41e2-9b0b-bda82ebd7dd4, infoPort=44447, infoSecurePort=0, ipcPort=36345, storageInfo=lv=-57;cid=testClusterID;nsid=341549530;c=1733637023632) 2024-12-08T05:50:24,399 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x586fc600ee1d39d4 with lease ID 0x7206f96d451258b7: from storage DS-3ebf26fc-f51c-4143-acc9-f93fae9ced93 node DatanodeRegistration(127.0.0.1:40747, datanodeUuid=efd53946-5941-41e2-9b0b-bda82ebd7dd4, infoPort=44447, infoSecurePort=0, ipcPort=36345, storageInfo=lv=-57;cid=testClusterID;nsid=341549530;c=1733637023632), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T05:50:24,399 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x586fc600ee1d39d4 with lease ID 0x7206f96d451258b7: Processing first storage report for DS-aafb9aab-3773-41c8-8d30-a00f0165e9d6 from datanode DatanodeRegistration(127.0.0.1:40747, datanodeUuid=efd53946-5941-41e2-9b0b-bda82ebd7dd4, infoPort=44447, infoSecurePort=0, ipcPort=36345, storageInfo=lv=-57;cid=testClusterID;nsid=341549530;c=1733637023632) 2024-12-08T05:50:24,400 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x586fc600ee1d39d4 with lease ID 0x7206f96d451258b7: from storage DS-aafb9aab-3773-41c8-8d30-a00f0165e9d6 node DatanodeRegistration(127.0.0.1:40747, datanodeUuid=efd53946-5941-41e2-9b0b-bda82ebd7dd4, infoPort=44447, infoSecurePort=0, ipcPort=36345, storageInfo=lv=-57;cid=testClusterID;nsid=341549530;c=1733637023632), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T05:50:24,491 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794 2024-12-08T05:50:24,494 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/cluster_ad2183ec-0874-c6b8-5c7f-1444c5a18074/zookeeper_0, clientPort=49789, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/cluster_ad2183ec-0874-c6b8-5c7f-1444c5a18074/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/cluster_ad2183ec-0874-c6b8-5c7f-1444c5a18074/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-08T05:50:24,495 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=49789 2024-12-08T05:50:24,496 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:50:24,497 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:50:24,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40747 is added to blk_1073741825_1001 (size=7) 2024-12-08T05:50:24,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42141 is added to blk_1073741825_1001 (size=7) 2024-12-08T05:50:24,509 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10 with version=8 2024-12-08T05:50:24,509 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/hbase-staging 2024-12-08T05:50:24,512 INFO [Time-limited test {}] client.ConnectionUtils(128): master/0d942cb2025d:0 server-side Connection retries=45 2024-12-08T05:50:24,512 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T05:50:24,512 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T05:50:24,512 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T05:50:24,512 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T05:50:24,512 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T05:50:24,512 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-08T05:50:24,512 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T05:50:24,513 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40185 2024-12-08T05:50:24,514 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:40185 connecting to ZooKeeper ensemble=127.0.0.1:49789 2024-12-08T05:50:24,519 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:401850x0, quorum=127.0.0.1:49789, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T05:50:24,520 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40185-0x101909f957d0000 connected 2024-12-08T05:50:24,543 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:50:24,545 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:50:24,548 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40185-0x101909f957d0000, quorum=127.0.0.1:49789, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T05:50:24,548 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10, hbase.cluster.distributed=false 2024-12-08T05:50:24,549 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40185-0x101909f957d0000, quorum=127.0.0.1:49789, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T05:50:24,552 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40185 2024-12-08T05:50:24,553 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40185 2024-12-08T05:50:24,555 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40185 2024-12-08T05:50:24,555 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40185 2024-12-08T05:50:24,555 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40185 2024-12-08T05:50:24,575 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/0d942cb2025d:0 server-side Connection retries=45 2024-12-08T05:50:24,576 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T05:50:24,576 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T05:50:24,576 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T05:50:24,576 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T05:50:24,576 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T05:50:24,576 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-08T05:50:24,576 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T05:50:24,577 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36989 2024-12-08T05:50:24,579 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36989 connecting to ZooKeeper ensemble=127.0.0.1:49789 2024-12-08T05:50:24,580 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:50:24,582 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:50:24,588 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:369890x0, quorum=127.0.0.1:49789, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T05:50:24,589 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36989-0x101909f957d0001, quorum=127.0.0.1:49789, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T05:50:24,589 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36989-0x101909f957d0001 connected 2024-12-08T05:50:24,589 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-08T05:50:24,592 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-08T05:50:24,593 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36989-0x101909f957d0001, quorum=127.0.0.1:49789, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-08T05:50:24,594 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36989-0x101909f957d0001, quorum=127.0.0.1:49789, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T05:50:24,596 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36989 2024-12-08T05:50:24,597 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36989 2024-12-08T05:50:24,600 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36989 2024-12-08T05:50:24,601 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36989 2024-12-08T05:50:24,601 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36989 2024-12-08T05:50:24,614 DEBUG [M:0;0d942cb2025d:40185 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;0d942cb2025d:40185 2024-12-08T05:50:24,614 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/0d942cb2025d,40185,1733637024511 2024-12-08T05:50:24,616 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36989-0x101909f957d0001, quorum=127.0.0.1:49789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T05:50:24,616 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40185-0x101909f957d0000, quorum=127.0.0.1:49789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T05:50:24,616 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40185-0x101909f957d0000, quorum=127.0.0.1:49789, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/0d942cb2025d,40185,1733637024511 2024-12-08T05:50:24,619 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40185-0x101909f957d0000, quorum=127.0.0.1:49789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:50:24,619 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36989-0x101909f957d0001, quorum=127.0.0.1:49789, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-08T05:50:24,619 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36989-0x101909f957d0001, quorum=127.0.0.1:49789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:50:24,620 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40185-0x101909f957d0000, quorum=127.0.0.1:49789, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-08T05:50:24,620 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/0d942cb2025d,40185,1733637024511 from backup master directory 2024-12-08T05:50:24,622 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40185-0x101909f957d0000, quorum=127.0.0.1:49789, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/0d942cb2025d,40185,1733637024511 2024-12-08T05:50:24,622 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40185-0x101909f957d0000, quorum=127.0.0.1:49789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T05:50:24,622 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36989-0x101909f957d0001, quorum=127.0.0.1:49789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T05:50:24,622 WARN [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T05:50:24,622 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=0d942cb2025d,40185,1733637024511 2024-12-08T05:50:24,632 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/hbase.id] with ID: c839cd3f-d143-4b00-858a-b5cfd50be2e6 2024-12-08T05:50:24,632 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/.tmp/hbase.id 2024-12-08T05:50:24,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40747 is added to blk_1073741826_1002 (size=42) 2024-12-08T05:50:24,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42141 is added to blk_1073741826_1002 (size=42) 2024-12-08T05:50:24,649 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/.tmp/hbase.id]:[hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/hbase.id] 2024-12-08T05:50:24,671 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:50:24,671 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-08T05:50:24,674 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-08T05:50:24,676 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40185-0x101909f957d0000, quorum=127.0.0.1:49789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:50:24,676 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36989-0x101909f957d0001, quorum=127.0.0.1:49789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:50:24,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40747 is added to blk_1073741827_1003 (size=196) 2024-12-08T05:50:24,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42141 is added to blk_1073741827_1003 (size=196) 2024-12-08T05:50:24,693 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T05:50:24,694 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-08T05:50:24,694 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T05:50:24,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40747 is added to blk_1073741828_1004 (size=1189) 2024-12-08T05:50:24,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42141 is added to blk_1073741828_1004 (size=1189) 2024-12-08T05:50:24,712 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/MasterData/data/master/store 2024-12-08T05:50:24,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40747 is added to blk_1073741829_1005 (size=34) 2024-12-08T05:50:24,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42141 is added to blk_1073741829_1005 (size=34) 2024-12-08T05:50:24,725 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T05:50:24,725 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-08T05:50:24,725 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:50:24,725 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:50:24,725 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-08T05:50:24,725 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:50:24,725 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:50:24,725 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733637024725Disabling compacts and flushes for region at 1733637024725Disabling writes for close at 1733637024725Writing region close event to WAL at 1733637024725Closed at 1733637024725 2024-12-08T05:50:24,726 WARN [master/0d942cb2025d:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/MasterData/data/master/store/.initializing 2024-12-08T05:50:24,726 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/MasterData/WALs/0d942cb2025d,40185,1733637024511 2024-12-08T05:50:24,730 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0d942cb2025d%2C40185%2C1733637024511, suffix=, logDir=hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/MasterData/WALs/0d942cb2025d,40185,1733637024511, archiveDir=hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/MasterData/oldWALs, maxLogs=10 2024-12-08T05:50:24,731 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0d942cb2025d%2C40185%2C1733637024511.1733637024730 2024-12-08T05:50:24,737 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/MasterData/WALs/0d942cb2025d,40185,1733637024511/0d942cb2025d%2C40185%2C1733637024511.1733637024730 2024-12-08T05:50:24,744 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44447:44447),(127.0.0.1/127.0.0.1:36269:36269)] 2024-12-08T05:50:24,746 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-08T05:50:24,747 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T05:50:24,747 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:50:24,747 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:50:24,752 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:50:24,755 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-08T05:50:24,755 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:50:24,756 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:50:24,756 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:50:24,758 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-08T05:50:24,758 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:50:24,759 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T05:50:24,759 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:50:24,760 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-08T05:50:24,761 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:50:24,761 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T05:50:24,761 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:50:24,763 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-08T05:50:24,763 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:50:24,764 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T05:50:24,764 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:50:24,766 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:50:24,766 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:50:24,768 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:50:24,768 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:50:24,769 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-08T05:50:24,771 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:50:24,777 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T05:50:24,778 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=871301, jitterRate=0.10791651904582977}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-08T05:50:24,779 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733637024747Initializing all the Stores at 1733637024749 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733637024749Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733637024752 (+3 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733637024752Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733637024752Cleaning up temporary data from old regions at 1733637024768 (+16 ms)Region opened successfully at 1733637024779 (+11 ms) 2024-12-08T05:50:24,780 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-08T05:50:24,785 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@62c829ad, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0d942cb2025d/172.17.0.2:0 2024-12-08T05:50:24,786 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-08T05:50:24,786 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-08T05:50:24,786 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-08T05:50:24,787 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-08T05:50:24,787 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-08T05:50:24,788 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-08T05:50:24,788 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-08T05:50:24,790 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-08T05:50:24,791 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40185-0x101909f957d0000, quorum=127.0.0.1:49789, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-08T05:50:24,792 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-08T05:50:24,793 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-08T05:50:24,793 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40185-0x101909f957d0000, quorum=127.0.0.1:49789, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-08T05:50:24,796 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-08T05:50:24,796 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-08T05:50:24,797 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40185-0x101909f957d0000, quorum=127.0.0.1:49789, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-08T05:50:24,800 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-08T05:50:24,801 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40185-0x101909f957d0000, quorum=127.0.0.1:49789, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-08T05:50:24,802 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-08T05:50:24,804 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40185-0x101909f957d0000, quorum=127.0.0.1:49789, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-08T05:50:24,805 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-08T05:50:24,813 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36989-0x101909f957d0001, quorum=127.0.0.1:49789, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T05:50:24,813 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36989-0x101909f957d0001, quorum=127.0.0.1:49789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:50:24,813 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40185-0x101909f957d0000, quorum=127.0.0.1:49789, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T05:50:24,813 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40185-0x101909f957d0000, quorum=127.0.0.1:49789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:50:24,813 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=0d942cb2025d,40185,1733637024511, sessionid=0x101909f957d0000, setting cluster-up flag (Was=false) 2024-12-08T05:50:24,817 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36989-0x101909f957d0001, quorum=127.0.0.1:49789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:50:24,817 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40185-0x101909f957d0000, quorum=127.0.0.1:49789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:50:24,823 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-08T05:50:24,825 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0d942cb2025d,40185,1733637024511 2024-12-08T05:50:24,828 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36989-0x101909f957d0001, quorum=127.0.0.1:49789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:50:24,828 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40185-0x101909f957d0000, quorum=127.0.0.1:49789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:50:24,834 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-08T05:50:24,835 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0d942cb2025d,40185,1733637024511 2024-12-08T05:50:24,837 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-08T05:50:24,839 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-08T05:50:24,839 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-08T05:50:24,839 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-08T05:50:24,839 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 0d942cb2025d,40185,1733637024511 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-08T05:50:24,841 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/0d942cb2025d:0, corePoolSize=5, maxPoolSize=5 2024-12-08T05:50:24,841 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/0d942cb2025d:0, corePoolSize=5, maxPoolSize=5 2024-12-08T05:50:24,841 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/0d942cb2025d:0, corePoolSize=5, maxPoolSize=5 2024-12-08T05:50:24,841 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/0d942cb2025d:0, corePoolSize=5, maxPoolSize=5 2024-12-08T05:50:24,841 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/0d942cb2025d:0, corePoolSize=10, maxPoolSize=10 2024-12-08T05:50:24,841 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:50:24,841 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/0d942cb2025d:0, corePoolSize=2, maxPoolSize=2 2024-12-08T05:50:24,841 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:50:24,846 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T05:50:24,846 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-08T05:50:24,847 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:50:24,848 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733637054848 2024-12-08T05:50:24,848 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-08T05:50:24,848 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-08T05:50:24,848 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-08T05:50:24,848 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-08T05:50:24,848 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-08T05:50:24,848 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-08T05:50:24,848 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-08T05:50:24,848 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-08T05:50:24,849 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-08T05:50:24,849 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-08T05:50:24,849 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-08T05:50:24,849 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-08T05:50:24,849 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-08T05:50:24,849 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/0d942cb2025d:0:becomeActiveMaster-HFileCleaner.large.0-1733637024849,5,FailOnTimeoutGroup] 2024-12-08T05:50:24,850 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/0d942cb2025d:0:becomeActiveMaster-HFileCleaner.small.0-1733637024849,5,FailOnTimeoutGroup] 2024-12-08T05:50:24,850 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-08T05:50:24,850 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-08T05:50:24,850 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-08T05:50:24,850 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-08T05:50:24,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40747 is added to blk_1073741831_1007 (size=1321) 2024-12-08T05:50:24,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42141 is added to blk_1073741831_1007 (size=1321) 2024-12-08T05:50:24,861 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-08T05:50:24,861 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10 2024-12-08T05:50:24,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42141 is added to blk_1073741832_1008 (size=32) 2024-12-08T05:50:24,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40747 is added to blk_1073741832_1008 (size=32) 2024-12-08T05:50:24,869 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T05:50:24,872 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-08T05:50:24,874 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-08T05:50:24,874 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:50:24,875 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:50:24,875 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-08T05:50:24,876 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-08T05:50:24,876 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:50:24,877 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:50:24,877 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-08T05:50:24,878 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-08T05:50:24,878 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:50:24,879 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:50:24,879 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-08T05:50:24,880 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-08T05:50:24,880 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:50:24,881 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:50:24,881 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-08T05:50:24,881 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/hbase/meta/1588230740 2024-12-08T05:50:24,882 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/hbase/meta/1588230740 2024-12-08T05:50:24,914 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-08T05:50:24,914 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-08T05:50:24,914 INFO [RS:0;0d942cb2025d:36989 {}] regionserver.HRegionServer(746): ClusterId : c839cd3f-d143-4b00-858a-b5cfd50be2e6 2024-12-08T05:50:24,914 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-08T05:50:24,914 DEBUG [RS:0;0d942cb2025d:36989 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-08T05:50:24,916 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-08T05:50:24,917 DEBUG [RS:0;0d942cb2025d:36989 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-08T05:50:24,917 DEBUG [RS:0;0d942cb2025d:36989 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-08T05:50:24,918 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T05:50:24,919 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=719725, jitterRate=-0.08482234179973602}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-08T05:50:24,920 DEBUG [RS:0;0d942cb2025d:36989 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-08T05:50:24,920 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733637024870Initializing all the Stores at 1733637024870Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733637024870Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733637024872 (+2 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733637024872Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733637024872Cleaning up temporary data from old regions at 1733637024914 (+42 ms)Region opened successfully at 1733637024920 (+6 ms) 2024-12-08T05:50:24,920 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-08T05:50:24,920 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-08T05:50:24,920 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-08T05:50:24,920 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-08T05:50:24,920 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-08T05:50:24,920 DEBUG [RS:0;0d942cb2025d:36989 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@45fb486e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0d942cb2025d/172.17.0.2:0 2024-12-08T05:50:24,924 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-08T05:50:24,925 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733637024920Disabling compacts and flushes for region at 1733637024920Disabling writes for close at 1733637024920Writing region close event to WAL at 1733637024924 (+4 ms)Closed at 1733637024924 2024-12-08T05:50:24,926 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T05:50:24,926 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-08T05:50:24,926 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-08T05:50:24,928 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-08T05:50:24,929 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-08T05:50:24,939 DEBUG [RS:0;0d942cb2025d:36989 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;0d942cb2025d:36989 2024-12-08T05:50:24,939 INFO [RS:0;0d942cb2025d:36989 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-08T05:50:24,939 INFO [RS:0;0d942cb2025d:36989 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-08T05:50:24,939 DEBUG [RS:0;0d942cb2025d:36989 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-08T05:50:24,940 INFO [RS:0;0d942cb2025d:36989 {}] regionserver.HRegionServer(2659): reportForDuty to master=0d942cb2025d,40185,1733637024511 with port=36989, startcode=1733637024575 2024-12-08T05:50:24,940 DEBUG [RS:0;0d942cb2025d:36989 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-08T05:50:24,943 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45141, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-08T05:50:24,943 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40185 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 0d942cb2025d,36989,1733637024575 2024-12-08T05:50:24,944 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40185 {}] master.ServerManager(517): Registering regionserver=0d942cb2025d,36989,1733637024575 2024-12-08T05:50:24,946 DEBUG [RS:0;0d942cb2025d:36989 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10 2024-12-08T05:50:24,946 DEBUG [RS:0;0d942cb2025d:36989 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46561 2024-12-08T05:50:24,946 DEBUG [RS:0;0d942cb2025d:36989 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-08T05:50:24,947 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40185-0x101909f957d0000, quorum=127.0.0.1:49789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T05:50:24,948 DEBUG [RS:0;0d942cb2025d:36989 {}] zookeeper.ZKUtil(111): regionserver:36989-0x101909f957d0001, quorum=127.0.0.1:49789, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/0d942cb2025d,36989,1733637024575 2024-12-08T05:50:24,948 WARN [RS:0;0d942cb2025d:36989 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T05:50:24,948 INFO [RS:0;0d942cb2025d:36989 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T05:50:24,948 DEBUG [RS:0;0d942cb2025d:36989 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575 2024-12-08T05:50:24,949 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [0d942cb2025d,36989,1733637024575] 2024-12-08T05:50:24,953 INFO [RS:0;0d942cb2025d:36989 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-08T05:50:24,956 INFO [RS:0;0d942cb2025d:36989 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-08T05:50:24,957 INFO [RS:0;0d942cb2025d:36989 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-08T05:50:24,957 INFO [RS:0;0d942cb2025d:36989 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T05:50:24,957 INFO [RS:0;0d942cb2025d:36989 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-08T05:50:24,958 INFO [RS:0;0d942cb2025d:36989 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-08T05:50:24,958 INFO [RS:0;0d942cb2025d:36989 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-08T05:50:24,958 DEBUG [RS:0;0d942cb2025d:36989 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:50:24,959 DEBUG [RS:0;0d942cb2025d:36989 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:50:24,959 DEBUG [RS:0;0d942cb2025d:36989 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:50:24,959 DEBUG [RS:0;0d942cb2025d:36989 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:50:24,959 DEBUG [RS:0;0d942cb2025d:36989 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:50:24,959 DEBUG [RS:0;0d942cb2025d:36989 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/0d942cb2025d:0, corePoolSize=2, maxPoolSize=2 2024-12-08T05:50:24,959 DEBUG [RS:0;0d942cb2025d:36989 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:50:24,959 DEBUG [RS:0;0d942cb2025d:36989 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:50:24,959 DEBUG [RS:0;0d942cb2025d:36989 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:50:24,959 DEBUG [RS:0;0d942cb2025d:36989 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:50:24,959 DEBUG [RS:0;0d942cb2025d:36989 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:50:24,959 DEBUG [RS:0;0d942cb2025d:36989 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:50:24,959 DEBUG [RS:0;0d942cb2025d:36989 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/0d942cb2025d:0, corePoolSize=3, maxPoolSize=3 2024-12-08T05:50:24,960 DEBUG [RS:0;0d942cb2025d:36989 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0, corePoolSize=3, maxPoolSize=3 2024-12-08T05:50:24,960 INFO [RS:0;0d942cb2025d:36989 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T05:50:24,960 INFO [RS:0;0d942cb2025d:36989 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T05:50:24,960 INFO [RS:0;0d942cb2025d:36989 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T05:50:24,961 INFO [RS:0;0d942cb2025d:36989 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-08T05:50:24,961 INFO [RS:0;0d942cb2025d:36989 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-08T05:50:24,961 INFO [RS:0;0d942cb2025d:36989 {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,36989,1733637024575-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T05:50:24,975 INFO [RS:0;0d942cb2025d:36989 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-08T05:50:24,975 INFO [RS:0;0d942cb2025d:36989 {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,36989,1733637024575-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T05:50:24,975 INFO [RS:0;0d942cb2025d:36989 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T05:50:24,975 INFO [RS:0;0d942cb2025d:36989 {}] regionserver.Replication(171): 0d942cb2025d,36989,1733637024575 started 2024-12-08T05:50:24,989 INFO [RS:0;0d942cb2025d:36989 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T05:50:24,989 INFO [RS:0;0d942cb2025d:36989 {}] regionserver.HRegionServer(1482): Serving as 0d942cb2025d,36989,1733637024575, RpcServer on 0d942cb2025d/172.17.0.2:36989, sessionid=0x101909f957d0001 2024-12-08T05:50:24,989 DEBUG [RS:0;0d942cb2025d:36989 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-08T05:50:24,989 DEBUG [RS:0;0d942cb2025d:36989 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 0d942cb2025d,36989,1733637024575 2024-12-08T05:50:24,989 DEBUG [RS:0;0d942cb2025d:36989 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0d942cb2025d,36989,1733637024575' 2024-12-08T05:50:24,989 DEBUG [RS:0;0d942cb2025d:36989 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-08T05:50:24,990 DEBUG [RS:0;0d942cb2025d:36989 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-08T05:50:24,991 DEBUG [RS:0;0d942cb2025d:36989 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-08T05:50:24,991 DEBUG [RS:0;0d942cb2025d:36989 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-08T05:50:24,991 DEBUG [RS:0;0d942cb2025d:36989 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 0d942cb2025d,36989,1733637024575 2024-12-08T05:50:24,991 DEBUG [RS:0;0d942cb2025d:36989 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0d942cb2025d,36989,1733637024575' 2024-12-08T05:50:24,991 DEBUG [RS:0;0d942cb2025d:36989 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-08T05:50:24,991 DEBUG [RS:0;0d942cb2025d:36989 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-08T05:50:24,992 DEBUG [RS:0;0d942cb2025d:36989 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-08T05:50:24,992 INFO [RS:0;0d942cb2025d:36989 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-08T05:50:24,992 INFO [RS:0;0d942cb2025d:36989 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-08T05:50:25,079 WARN [0d942cb2025d:40185 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-08T05:50:25,095 INFO [RS:0;0d942cb2025d:36989 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0d942cb2025d%2C36989%2C1733637024575, suffix=, logDir=hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575, archiveDir=hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/oldWALs, maxLogs=32 2024-12-08T05:50:25,096 INFO [RS:0;0d942cb2025d:36989 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0d942cb2025d%2C36989%2C1733637024575.1733637025096 2024-12-08T05:50:25,108 INFO [RS:0;0d942cb2025d:36989 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.1733637025096 2024-12-08T05:50:25,119 DEBUG [RS:0;0d942cb2025d:36989 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44447:44447),(127.0.0.1/127.0.0.1:36269:36269)] 2024-12-08T05:50:25,330 DEBUG [0d942cb2025d:40185 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-08T05:50:25,330 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=0d942cb2025d,36989,1733637024575 2024-12-08T05:50:25,332 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0d942cb2025d,36989,1733637024575, state=OPENING 2024-12-08T05:50:25,334 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-08T05:50:25,335 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36989-0x101909f957d0001, quorum=127.0.0.1:49789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:50:25,335 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40185-0x101909f957d0000, quorum=127.0.0.1:49789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:50:25,337 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T05:50:25,337 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T05:50:25,337 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-08T05:50:25,337 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=0d942cb2025d,36989,1733637024575}] 2024-12-08T05:50:25,491 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-08T05:50:25,493 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48039, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-08T05:50:25,498 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-08T05:50:25,498 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T05:50:25,500 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0d942cb2025d%2C36989%2C1733637024575.meta, suffix=.meta, logDir=hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575, archiveDir=hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/oldWALs, maxLogs=32 2024-12-08T05:50:25,501 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta 2024-12-08T05:50:25,506 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta 2024-12-08T05:50:25,508 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36269:36269),(127.0.0.1/127.0.0.1:44447:44447)] 2024-12-08T05:50:25,508 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-08T05:50:25,509 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-08T05:50:25,509 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-08T05:50:25,509 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-08T05:50:25,509 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-08T05:50:25,509 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T05:50:25,510 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-08T05:50:25,510 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-08T05:50:25,512 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-08T05:50:25,513 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-08T05:50:25,513 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:50:25,514 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:50:25,514 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-08T05:50:25,515 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-08T05:50:25,515 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:50:25,516 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:50:25,516 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-08T05:50:25,516 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-08T05:50:25,516 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:50:25,517 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:50:25,517 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-08T05:50:25,518 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-08T05:50:25,518 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:50:25,518 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:50:25,518 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-08T05:50:25,519 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/hbase/meta/1588230740 2024-12-08T05:50:25,521 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/hbase/meta/1588230740 2024-12-08T05:50:25,522 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-08T05:50:25,522 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-08T05:50:25,523 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-08T05:50:25,524 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-08T05:50:25,525 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=872630, jitterRate=0.10960742831230164}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-08T05:50:25,526 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-08T05:50:25,526 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733637025510Writing region info on filesystem at 1733637025510Initializing all the Stores at 1733637025511 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733637025511Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733637025512 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733637025512Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733637025512Cleaning up temporary data from old regions at 1733637025522 (+10 ms)Running coprocessor post-open hooks at 1733637025526 (+4 ms)Region opened successfully at 1733637025526 2024-12-08T05:50:25,527 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733637025491 2024-12-08T05:50:25,531 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-08T05:50:25,531 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-08T05:50:25,532 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=0d942cb2025d,36989,1733637024575 2024-12-08T05:50:25,533 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0d942cb2025d,36989,1733637024575, state=OPEN 2024-12-08T05:50:25,537 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36989-0x101909f957d0001, quorum=127.0.0.1:49789, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T05:50:25,537 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40185-0x101909f957d0000, quorum=127.0.0.1:49789, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T05:50:25,538 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=0d942cb2025d,36989,1733637024575 2024-12-08T05:50:25,538 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T05:50:25,538 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T05:50:25,542 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-08T05:50:25,542 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=0d942cb2025d,36989,1733637024575 in 201 msec 2024-12-08T05:50:25,545 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-08T05:50:25,545 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 616 msec 2024-12-08T05:50:25,546 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T05:50:25,546 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-08T05:50:25,548 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-08T05:50:25,548 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=0d942cb2025d,36989,1733637024575, seqNum=-1] 2024-12-08T05:50:25,549 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T05:50:25,550 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37381, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T05:50:25,558 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 718 msec 2024-12-08T05:50:25,558 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733637025558, completionTime=-1 2024-12-08T05:50:25,558 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-08T05:50:25,558 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-08T05:50:25,561 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-08T05:50:25,561 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733637085561 2024-12-08T05:50:25,561 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733637145561 2024-12-08T05:50:25,561 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-08T05:50:25,561 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,40185,1733637024511-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T05:50:25,561 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,40185,1733637024511-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T05:50:25,561 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,40185,1733637024511-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T05:50:25,562 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-0d942cb2025d:40185, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T05:50:25,562 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-08T05:50:25,562 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-08T05:50:25,564 DEBUG [master/0d942cb2025d:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-08T05:50:25,566 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.944sec 2024-12-08T05:50:25,566 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-08T05:50:25,566 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-08T05:50:25,566 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-08T05:50:25,566 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-08T05:50:25,567 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-08T05:50:25,567 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,40185,1733637024511-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T05:50:25,567 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,40185,1733637024511-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-08T05:50:25,570 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-08T05:50:25,570 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-08T05:50:25,570 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,40185,1733637024511-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T05:50:25,613 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3a423418, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T05:50:25,613 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 0d942cb2025d,40185,-1 for getting cluster id 2024-12-08T05:50:25,614 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-08T05:50:25,616 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c839cd3f-d143-4b00-858a-b5cfd50be2e6' 2024-12-08T05:50:25,617 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-08T05:50:25,617 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c839cd3f-d143-4b00-858a-b5cfd50be2e6" 2024-12-08T05:50:25,617 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e8856c5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T05:50:25,617 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [0d942cb2025d,40185,-1] 2024-12-08T05:50:25,617 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-08T05:50:25,618 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T05:50:25,619 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44152, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-08T05:50:25,621 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6be9d170, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T05:50:25,621 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-08T05:50:25,622 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=0d942cb2025d,36989,1733637024575, seqNum=-1] 2024-12-08T05:50:25,623 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T05:50:25,625 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60492, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T05:50:25,627 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=0d942cb2025d,40185,1733637024511 2024-12-08T05:50:25,627 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:50:25,630 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-08T05:50:25,646 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/0d942cb2025d:0 server-side Connection retries=45 2024-12-08T05:50:25,646 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T05:50:25,646 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T05:50:25,646 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T05:50:25,646 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T05:50:25,646 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T05:50:25,646 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-08T05:50:25,647 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T05:50:25,647 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36443 2024-12-08T05:50:25,648 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36443 connecting to ZooKeeper ensemble=127.0.0.1:49789 2024-12-08T05:50:25,649 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-12-08T05:50:25,649 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:50:25,651 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:50:25,656 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:364430x0, quorum=127.0.0.1:49789, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T05:50:25,656 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:364430x0, quorum=127.0.0.1:49789, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-12-08T05:50:25,657 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36443-0x101909f957d0002 connected 2024-12-08T05:50:25,657 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-12-08T05:50:25,658 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-08T05:50:25,661 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-08T05:50:25,661 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:36443-0x101909f957d0002, quorum=127.0.0.1:49789, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-08T05:50:25,663 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36443-0x101909f957d0002, quorum=127.0.0.1:49789, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T05:50:25,667 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36443 2024-12-08T05:50:25,667 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36443 2024-12-08T05:50:25,668 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36443 2024-12-08T05:50:25,669 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36443 2024-12-08T05:50:25,669 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36443 2024-12-08T05:50:25,671 INFO [RS:1;0d942cb2025d:36443 {}] regionserver.HRegionServer(746): ClusterId : c839cd3f-d143-4b00-858a-b5cfd50be2e6 2024-12-08T05:50:25,671 DEBUG [RS:1;0d942cb2025d:36443 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-08T05:50:25,677 DEBUG [RS:1;0d942cb2025d:36443 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-08T05:50:25,677 DEBUG [RS:1;0d942cb2025d:36443 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-08T05:50:25,679 DEBUG [RS:1;0d942cb2025d:36443 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-08T05:50:25,680 DEBUG [RS:1;0d942cb2025d:36443 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a144fdc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0d942cb2025d/172.17.0.2:0 2024-12-08T05:50:25,696 DEBUG [RS:1;0d942cb2025d:36443 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;0d942cb2025d:36443 2024-12-08T05:50:25,696 INFO [RS:1;0d942cb2025d:36443 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-08T05:50:25,696 INFO [RS:1;0d942cb2025d:36443 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-08T05:50:25,696 DEBUG [RS:1;0d942cb2025d:36443 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-08T05:50:25,697 INFO [RS:1;0d942cb2025d:36443 {}] regionserver.HRegionServer(2659): reportForDuty to master=0d942cb2025d,40185,1733637024511 with port=36443, startcode=1733637025646 2024-12-08T05:50:25,697 DEBUG [RS:1;0d942cb2025d:36443 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-08T05:50:25,699 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52569, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-08T05:50:25,699 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40185 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 0d942cb2025d,36443,1733637025646 2024-12-08T05:50:25,700 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40185 {}] master.ServerManager(517): Registering regionserver=0d942cb2025d,36443,1733637025646 2024-12-08T05:50:25,701 DEBUG [RS:1;0d942cb2025d:36443 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10 2024-12-08T05:50:25,701 DEBUG [RS:1;0d942cb2025d:36443 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46561 2024-12-08T05:50:25,701 DEBUG [RS:1;0d942cb2025d:36443 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-08T05:50:25,704 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40185-0x101909f957d0000, quorum=127.0.0.1:49789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T05:50:25,704 DEBUG [RS:1;0d942cb2025d:36443 {}] zookeeper.ZKUtil(111): regionserver:36443-0x101909f957d0002, quorum=127.0.0.1:49789, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/0d942cb2025d,36443,1733637025646 2024-12-08T05:50:25,704 WARN [RS:1;0d942cb2025d:36443 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T05:50:25,704 INFO [RS:1;0d942cb2025d:36443 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T05:50:25,705 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [0d942cb2025d,36443,1733637025646] 2024-12-08T05:50:25,705 DEBUG [RS:1;0d942cb2025d:36443 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646 2024-12-08T05:50:25,710 INFO [RS:1;0d942cb2025d:36443 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-08T05:50:25,712 INFO [RS:1;0d942cb2025d:36443 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-08T05:50:25,713 INFO [RS:1;0d942cb2025d:36443 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-08T05:50:25,713 INFO [RS:1;0d942cb2025d:36443 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T05:50:25,717 INFO [RS:1;0d942cb2025d:36443 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-08T05:50:25,718 INFO [RS:1;0d942cb2025d:36443 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-08T05:50:25,719 INFO [RS:1;0d942cb2025d:36443 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-08T05:50:25,719 DEBUG [RS:1;0d942cb2025d:36443 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:50:25,719 DEBUG [RS:1;0d942cb2025d:36443 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:50:25,719 DEBUG [RS:1;0d942cb2025d:36443 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:50:25,719 DEBUG [RS:1;0d942cb2025d:36443 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:50:25,719 DEBUG [RS:1;0d942cb2025d:36443 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:50:25,719 DEBUG [RS:1;0d942cb2025d:36443 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/0d942cb2025d:0, corePoolSize=2, maxPoolSize=2 2024-12-08T05:50:25,719 DEBUG [RS:1;0d942cb2025d:36443 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:50:25,719 DEBUG [RS:1;0d942cb2025d:36443 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:50:25,719 DEBUG [RS:1;0d942cb2025d:36443 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:50:25,719 DEBUG [RS:1;0d942cb2025d:36443 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:50:25,719 DEBUG [RS:1;0d942cb2025d:36443 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:50:25,719 DEBUG [RS:1;0d942cb2025d:36443 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:50:25,719 DEBUG [RS:1;0d942cb2025d:36443 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/0d942cb2025d:0, corePoolSize=3, maxPoolSize=3 2024-12-08T05:50:25,719 DEBUG [RS:1;0d942cb2025d:36443 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0, corePoolSize=3, maxPoolSize=3 2024-12-08T05:50:25,720 INFO [RS:1;0d942cb2025d:36443 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T05:50:25,720 INFO [RS:1;0d942cb2025d:36443 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T05:50:25,720 INFO [RS:1;0d942cb2025d:36443 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T05:50:25,720 INFO [RS:1;0d942cb2025d:36443 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-08T05:50:25,720 INFO [RS:1;0d942cb2025d:36443 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-08T05:50:25,721 INFO [RS:1;0d942cb2025d:36443 {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,36443,1733637025646-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T05:50:25,740 INFO [RS:1;0d942cb2025d:36443 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-08T05:50:25,740 INFO [RS:1;0d942cb2025d:36443 {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,36443,1733637025646-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T05:50:25,740 INFO [RS:1;0d942cb2025d:36443 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T05:50:25,740 INFO [RS:1;0d942cb2025d:36443 {}] regionserver.Replication(171): 0d942cb2025d,36443,1733637025646 started 2024-12-08T05:50:25,756 INFO [RS:1;0d942cb2025d:36443 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T05:50:25,756 INFO [RS:1;0d942cb2025d:36443 {}] regionserver.HRegionServer(1482): Serving as 0d942cb2025d,36443,1733637025646, RpcServer on 0d942cb2025d/172.17.0.2:36443, sessionid=0x101909f957d0002 2024-12-08T05:50:25,756 DEBUG [RS:1;0d942cb2025d:36443 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-08T05:50:25,756 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;0d942cb2025d:36443,5,FailOnTimeoutGroup] 2024-12-08T05:50:25,756 DEBUG [RS:1;0d942cb2025d:36443 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 0d942cb2025d,36443,1733637025646 2024-12-08T05:50:25,756 DEBUG [RS:1;0d942cb2025d:36443 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0d942cb2025d,36443,1733637025646' 2024-12-08T05:50:25,756 DEBUG [RS:1;0d942cb2025d:36443 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-08T05:50:25,756 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-12-08T05:50:25,757 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-08T05:50:25,757 DEBUG [RS:1;0d942cb2025d:36443 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-08T05:50:25,758 DEBUG [RS:1;0d942cb2025d:36443 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-08T05:50:25,758 DEBUG [RS:1;0d942cb2025d:36443 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-08T05:50:25,758 DEBUG [RS:1;0d942cb2025d:36443 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 0d942cb2025d,36443,1733637025646 2024-12-08T05:50:25,758 DEBUG [RS:1;0d942cb2025d:36443 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0d942cb2025d,36443,1733637025646' 2024-12-08T05:50:25,758 DEBUG [RS:1;0d942cb2025d:36443 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-08T05:50:25,758 DEBUG [RS:1;0d942cb2025d:36443 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-08T05:50:25,758 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is 0d942cb2025d,40185,1733637024511 2024-12-08T05:50:25,759 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@30890fb0 2024-12-08T05:50:25,759 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-08T05:50:25,759 DEBUG [RS:1;0d942cb2025d:36443 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-08T05:50:25,759 INFO [RS:1;0d942cb2025d:36443 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-08T05:50:25,759 INFO [RS:1;0d942cb2025d:36443 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-08T05:50:25,761 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44160, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-08T05:50:25,762 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40185 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-08T05:50:25,762 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40185 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-08T05:50:25,762 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40185 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T05:50:25,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40185 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-12-08T05:50:25,766 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-12-08T05:50:25,766 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:50:25,766 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40185 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-12-08T05:50:25,767 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-08T05:50:25,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40185 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-08T05:50:25,776 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:50:25,779 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:50:25,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40747 is added to blk_1073741835_1011 (size=393) 2024-12-08T05:50:25,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42141 is added to blk_1073741835_1011 (size=393) 2024-12-08T05:50:25,785 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => a0d875ebeb3b6e27843ae87b1cb3e2f8, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1733637025761.a0d875ebeb3b6e27843ae87b1cb3e2f8.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10 2024-12-08T05:50:25,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40747 is added to blk_1073741836_1012 (size=76) 2024-12-08T05:50:25,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42141 is added to blk_1073741836_1012 (size=76) 2024-12-08T05:50:25,799 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1733637025761.a0d875ebeb3b6e27843ae87b1cb3e2f8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T05:50:25,799 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing a0d875ebeb3b6e27843ae87b1cb3e2f8, disabling compactions & flushes 2024-12-08T05:50:25,799 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1733637025761.a0d875ebeb3b6e27843ae87b1cb3e2f8. 2024-12-08T05:50:25,799 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733637025761.a0d875ebeb3b6e27843ae87b1cb3e2f8. 2024-12-08T05:50:25,799 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733637025761.a0d875ebeb3b6e27843ae87b1cb3e2f8. after waiting 0 ms 2024-12-08T05:50:25,800 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1733637025761.a0d875ebeb3b6e27843ae87b1cb3e2f8. 2024-12-08T05:50:25,800 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733637025761.a0d875ebeb3b6e27843ae87b1cb3e2f8. 2024-12-08T05:50:25,800 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for a0d875ebeb3b6e27843ae87b1cb3e2f8: Waiting for close lock at 1733637025799Disabling compacts and flushes for region at 1733637025799Disabling writes for close at 1733637025799Writing region close event to WAL at 1733637025800 (+1 ms)Closed at 1733637025800 2024-12-08T05:50:25,801 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-12-08T05:50:25,802 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1733637025761.a0d875ebeb3b6e27843ae87b1cb3e2f8.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1733637025802"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733637025802"}]},"ts":"1733637025802"} 2024-12-08T05:50:25,805 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-08T05:50:25,807 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-08T05:50:25,807 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733637025807"}]},"ts":"1733637025807"} 2024-12-08T05:50:25,809 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-12-08T05:50:25,809 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=a0d875ebeb3b6e27843ae87b1cb3e2f8, ASSIGN}] 2024-12-08T05:50:25,811 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=a0d875ebeb3b6e27843ae87b1cb3e2f8, ASSIGN 2024-12-08T05:50:25,812 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=a0d875ebeb3b6e27843ae87b1cb3e2f8, ASSIGN; state=OFFLINE, location=0d942cb2025d,36989,1733637024575; forceNewPlan=false, retain=false 2024-12-08T05:50:25,862 INFO [RS:1;0d942cb2025d:36443 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0d942cb2025d%2C36443%2C1733637025646, suffix=, logDir=hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646, archiveDir=hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/oldWALs, maxLogs=32 2024-12-08T05:50:25,863 INFO [RS:1;0d942cb2025d:36443 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0d942cb2025d%2C36443%2C1733637025646.1733637025862 2024-12-08T05:50:25,874 INFO [RS:1;0d942cb2025d:36443 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 2024-12-08T05:50:25,875 DEBUG [RS:1;0d942cb2025d:36443 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44447:44447),(127.0.0.1/127.0.0.1:36269:36269)] 2024-12-08T05:50:25,963 INFO [0d942cb2025d:40185 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-08T05:50:25,963 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=a0d875ebeb3b6e27843ae87b1cb3e2f8, regionState=OPENING, regionLocation=0d942cb2025d,36989,1733637024575 2024-12-08T05:50:25,967 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=a0d875ebeb3b6e27843ae87b1cb3e2f8, ASSIGN because future has completed 2024-12-08T05:50:25,968 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure a0d875ebeb3b6e27843ae87b1cb3e2f8, server=0d942cb2025d,36989,1733637024575}] 2024-12-08T05:50:26,126 INFO [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1733637025761.a0d875ebeb3b6e27843ae87b1cb3e2f8. 2024-12-08T05:50:26,127 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => a0d875ebeb3b6e27843ae87b1cb3e2f8, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1733637025761.a0d875ebeb3b6e27843ae87b1cb3e2f8.', STARTKEY => '', ENDKEY => ''} 2024-12-08T05:50:26,127 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath a0d875ebeb3b6e27843ae87b1cb3e2f8 2024-12-08T05:50:26,127 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1733637025761.a0d875ebeb3b6e27843ae87b1cb3e2f8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T05:50:26,127 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for a0d875ebeb3b6e27843ae87b1cb3e2f8 2024-12-08T05:50:26,127 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for a0d875ebeb3b6e27843ae87b1cb3e2f8 2024-12-08T05:50:26,131 INFO [StoreOpener-a0d875ebeb3b6e27843ae87b1cb3e2f8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region a0d875ebeb3b6e27843ae87b1cb3e2f8 2024-12-08T05:50:26,132 INFO [StoreOpener-a0d875ebeb3b6e27843ae87b1cb3e2f8-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a0d875ebeb3b6e27843ae87b1cb3e2f8 columnFamilyName info 2024-12-08T05:50:26,132 DEBUG [StoreOpener-a0d875ebeb3b6e27843ae87b1cb3e2f8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:50:26,133 INFO [StoreOpener-a0d875ebeb3b6e27843ae87b1cb3e2f8-1 {}] regionserver.HStore(327): Store=a0d875ebeb3b6e27843ae87b1cb3e2f8/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T05:50:26,133 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for a0d875ebeb3b6e27843ae87b1cb3e2f8 2024-12-08T05:50:26,134 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8 2024-12-08T05:50:26,134 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8 2024-12-08T05:50:26,135 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for a0d875ebeb3b6e27843ae87b1cb3e2f8 2024-12-08T05:50:26,135 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for a0d875ebeb3b6e27843ae87b1cb3e2f8 2024-12-08T05:50:26,136 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for a0d875ebeb3b6e27843ae87b1cb3e2f8 2024-12-08T05:50:26,138 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T05:50:26,139 INFO [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened a0d875ebeb3b6e27843ae87b1cb3e2f8; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=728279, jitterRate=-0.07394552230834961}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-08T05:50:26,139 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for a0d875ebeb3b6e27843ae87b1cb3e2f8 2024-12-08T05:50:26,140 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for a0d875ebeb3b6e27843ae87b1cb3e2f8: Running coprocessor pre-open hook at 1733637026127Writing region info on filesystem at 1733637026128 (+1 ms)Initializing all the Stores at 1733637026128Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733637026129 (+1 ms)Cleaning up temporary data from old regions at 1733637026135 (+6 ms)Running coprocessor post-open hooks at 1733637026139 (+4 ms)Region opened successfully at 1733637026139 2024-12-08T05:50:26,141 INFO [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1733637025761.a0d875ebeb3b6e27843ae87b1cb3e2f8., pid=6, masterSystemTime=1733637026121 2024-12-08T05:50:26,145 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=a0d875ebeb3b6e27843ae87b1cb3e2f8, regionState=OPEN, openSeqNum=2, regionLocation=0d942cb2025d,36989,1733637024575 2024-12-08T05:50:26,146 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1733637025761.a0d875ebeb3b6e27843ae87b1cb3e2f8. 2024-12-08T05:50:26,146 INFO [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1733637025761.a0d875ebeb3b6e27843ae87b1cb3e2f8. 2024-12-08T05:50:26,148 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure a0d875ebeb3b6e27843ae87b1cb3e2f8, server=0d942cb2025d,36989,1733637024575 because future has completed 2024-12-08T05:50:26,154 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-08T05:50:26,154 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure a0d875ebeb3b6e27843ae87b1cb3e2f8, server=0d942cb2025d,36989,1733637024575 in 182 msec 2024-12-08T05:50:26,157 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-08T05:50:26,157 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=a0d875ebeb3b6e27843ae87b1cb3e2f8, ASSIGN in 345 msec 2024-12-08T05:50:26,158 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-08T05:50:26,158 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733637026158"}]},"ts":"1733637026158"} 2024-12-08T05:50:26,161 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-12-08T05:50:26,162 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-12-08T05:50:26,165 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 400 msec 2024-12-08T05:50:26,292 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-08T05:50:26,298 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:50:26,312 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:50:26,313 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:50:26,313 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:50:30,953 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-08T05:50:30,954 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-12-08T05:50:31,630 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-08T05:50:31,634 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:50:31,661 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:50:31,667 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:50:31,668 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:50:35,648 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-12-08T05:50:35,649 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-12-08T05:50:35,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40185 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-08T05:50:35,837 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-12-08T05:50:35,837 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-12-08T05:50:35,841 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-12-08T05:50:35,841 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1733637025761.a0d875ebeb3b6e27843ae87b1cb3e2f8. 2024-12-08T05:50:35,854 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T05:50:35,857 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T05:50:35,858 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T05:50:35,858 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T05:50:35,858 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-08T05:50:35,858 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@43b67cb4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/hadoop.log.dir/,AVAILABLE} 2024-12-08T05:50:35,859 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@168c0f83{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T05:50:35,975 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@431f0ae0{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/java.io.tmpdir/jetty-localhost-37837-hadoop-hdfs-3_4_1-tests_jar-_-any-2499885953904196576/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T05:50:35,975 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2a7d29fa{HTTP/1.1, (http/1.1)}{localhost:37837} 2024-12-08T05:50:35,975 INFO [Time-limited test {}] server.Server(415): Started @116159ms 2024-12-08T05:50:35,977 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T05:50:36,014 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T05:50:36,017 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T05:50:36,018 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T05:50:36,018 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T05:50:36,018 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-08T05:50:36,019 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@53c8e058{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/hadoop.log.dir/,AVAILABLE} 2024-12-08T05:50:36,019 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4eebb985{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T05:50:36,075 WARN [Thread-827 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/cluster_ad2183ec-0874-c6b8-5c7f-1444c5a18074/data/data5/current/BP-1312343861-172.17.0.2-1733637023632/current, will proceed with Du for space computation calculation, 2024-12-08T05:50:36,075 WARN [Thread-828 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/cluster_ad2183ec-0874-c6b8-5c7f-1444c5a18074/data/data6/current/BP-1312343861-172.17.0.2-1733637023632/current, will proceed with Du for space computation calculation, 2024-12-08T05:50:36,092 WARN [Thread-807 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T05:50:36,094 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x81a93199f12db74d with lease ID 0x7206f96d451258b8: Processing first storage report for DS-97e2bfc6-c424-4d2c-99c4-a2f9a381994f from datanode DatanodeRegistration(127.0.0.1:39795, datanodeUuid=107d6efa-7b49-4521-92ae-23356ed2ace3, infoPort=36929, infoSecurePort=0, ipcPort=43951, storageInfo=lv=-57;cid=testClusterID;nsid=341549530;c=1733637023632) 2024-12-08T05:50:36,094 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x81a93199f12db74d with lease ID 0x7206f96d451258b8: from storage DS-97e2bfc6-c424-4d2c-99c4-a2f9a381994f node DatanodeRegistration(127.0.0.1:39795, datanodeUuid=107d6efa-7b49-4521-92ae-23356ed2ace3, infoPort=36929, infoSecurePort=0, ipcPort=43951, storageInfo=lv=-57;cid=testClusterID;nsid=341549530;c=1733637023632), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T05:50:36,095 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x81a93199f12db74d with lease ID 0x7206f96d451258b8: Processing first storage report for DS-228fe470-0f9e-4002-88d2-2b374c4da4b4 from datanode DatanodeRegistration(127.0.0.1:39795, datanodeUuid=107d6efa-7b49-4521-92ae-23356ed2ace3, infoPort=36929, infoSecurePort=0, ipcPort=43951, storageInfo=lv=-57;cid=testClusterID;nsid=341549530;c=1733637023632) 2024-12-08T05:50:36,095 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x81a93199f12db74d with lease ID 0x7206f96d451258b8: from storage DS-228fe470-0f9e-4002-88d2-2b374c4da4b4 node DatanodeRegistration(127.0.0.1:39795, datanodeUuid=107d6efa-7b49-4521-92ae-23356ed2ace3, infoPort=36929, infoSecurePort=0, ipcPort=43951, storageInfo=lv=-57;cid=testClusterID;nsid=341549530;c=1733637023632), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T05:50:36,138 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@29d1c6fb{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/java.io.tmpdir/jetty-localhost-43601-hadoop-hdfs-3_4_1-tests_jar-_-any-10723302062992301366/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T05:50:36,138 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@31a3e3f4{HTTP/1.1, (http/1.1)}{localhost:43601} 2024-12-08T05:50:36,138 INFO [Time-limited test {}] server.Server(415): Started @116322ms 2024-12-08T05:50:36,140 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T05:50:36,175 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T05:50:36,178 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T05:50:36,179 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T05:50:36,179 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T05:50:36,179 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T05:50:36,180 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6aeddc33{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/hadoop.log.dir/,AVAILABLE} 2024-12-08T05:50:36,180 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@61ab06b3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T05:50:36,242 WARN [Thread-862 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/cluster_ad2183ec-0874-c6b8-5c7f-1444c5a18074/data/data7/current/BP-1312343861-172.17.0.2-1733637023632/current, will proceed with Du for space computation calculation, 2024-12-08T05:50:36,242 WARN [Thread-863 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/cluster_ad2183ec-0874-c6b8-5c7f-1444c5a18074/data/data8/current/BP-1312343861-172.17.0.2-1733637023632/current, will proceed with Du for space computation calculation, 2024-12-08T05:50:36,265 WARN [Thread-842 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T05:50:36,267 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1286b40a830ecef8 with lease ID 0x7206f96d451258b9: Processing first storage report for DS-c48f66e4-e0f6-4cef-a267-f504c8616ab7 from datanode DatanodeRegistration(127.0.0.1:40163, datanodeUuid=302c412d-fb86-4603-b346-54eeb740f071, infoPort=39209, infoSecurePort=0, ipcPort=42011, storageInfo=lv=-57;cid=testClusterID;nsid=341549530;c=1733637023632) 2024-12-08T05:50:36,267 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1286b40a830ecef8 with lease ID 0x7206f96d451258b9: from storage DS-c48f66e4-e0f6-4cef-a267-f504c8616ab7 node DatanodeRegistration(127.0.0.1:40163, datanodeUuid=302c412d-fb86-4603-b346-54eeb740f071, infoPort=39209, infoSecurePort=0, ipcPort=42011, storageInfo=lv=-57;cid=testClusterID;nsid=341549530;c=1733637023632), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T05:50:36,268 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1286b40a830ecef8 with lease ID 0x7206f96d451258b9: Processing first storage report for DS-a92bffe1-f5a1-4ffb-9068-7572e65139ce from datanode DatanodeRegistration(127.0.0.1:40163, datanodeUuid=302c412d-fb86-4603-b346-54eeb740f071, infoPort=39209, infoSecurePort=0, ipcPort=42011, storageInfo=lv=-57;cid=testClusterID;nsid=341549530;c=1733637023632) 2024-12-08T05:50:36,268 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1286b40a830ecef8 with lease ID 0x7206f96d451258b9: from storage DS-a92bffe1-f5a1-4ffb-9068-7572e65139ce node DatanodeRegistration(127.0.0.1:40163, datanodeUuid=302c412d-fb86-4603-b346-54eeb740f071, infoPort=39209, infoSecurePort=0, ipcPort=42011, storageInfo=lv=-57;cid=testClusterID;nsid=341549530;c=1733637023632), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-08T05:50:36,298 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6788d34a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/java.io.tmpdir/jetty-localhost-40629-hadoop-hdfs-3_4_1-tests_jar-_-any-13242815579158712896/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T05:50:36,298 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@66d367d8{HTTP/1.1, (http/1.1)}{localhost:40629} 2024-12-08T05:50:36,298 INFO [Time-limited test {}] server.Server(415): Started @116482ms 2024-12-08T05:50:36,299 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T05:50:36,392 WARN [Thread-889 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/cluster_ad2183ec-0874-c6b8-5c7f-1444c5a18074/data/data10/current/BP-1312343861-172.17.0.2-1733637023632/current, will proceed with Du for space computation calculation, 2024-12-08T05:50:36,392 WARN [Thread-888 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/cluster_ad2183ec-0874-c6b8-5c7f-1444c5a18074/data/data9/current/BP-1312343861-172.17.0.2-1733637023632/current, will proceed with Du for space computation calculation, 2024-12-08T05:50:36,409 WARN [Thread-877 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T05:50:36,411 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe63084f13de85715 with lease ID 0x7206f96d451258ba: Processing first storage report for DS-04b517c0-4e1b-48ef-adae-ca58306a99bb from datanode DatanodeRegistration(127.0.0.1:35443, datanodeUuid=b6350157-5b5c-4229-bb0e-1d4f5f672b97, infoPort=37355, infoSecurePort=0, ipcPort=44739, storageInfo=lv=-57;cid=testClusterID;nsid=341549530;c=1733637023632) 2024-12-08T05:50:36,411 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe63084f13de85715 with lease ID 0x7206f96d451258ba: from storage DS-04b517c0-4e1b-48ef-adae-ca58306a99bb node DatanodeRegistration(127.0.0.1:35443, datanodeUuid=b6350157-5b5c-4229-bb0e-1d4f5f672b97, infoPort=37355, infoSecurePort=0, ipcPort=44739, storageInfo=lv=-57;cid=testClusterID;nsid=341549530;c=1733637023632), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T05:50:36,411 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe63084f13de85715 with lease ID 0x7206f96d451258ba: Processing first storage report for DS-d4a5c0a8-3317-4ad7-a5b2-8febabc524b3 from datanode DatanodeRegistration(127.0.0.1:35443, datanodeUuid=b6350157-5b5c-4229-bb0e-1d4f5f672b97, infoPort=37355, infoSecurePort=0, ipcPort=44739, storageInfo=lv=-57;cid=testClusterID;nsid=341549530;c=1733637023632) 2024-12-08T05:50:36,411 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe63084f13de85715 with lease ID 0x7206f96d451258ba: from storage DS-d4a5c0a8-3317-4ad7-a5b2-8febabc524b3 node DatanodeRegistration(127.0.0.1:35443, datanodeUuid=b6350157-5b5c-4229-bb0e-1d4f5f672b97, infoPort=37355, infoSecurePort=0, ipcPort=44739, storageInfo=lv=-57;cid=testClusterID;nsid=341549530;c=1733637023632), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T05:50:36,418 WARN [ResponseProcessor for block BP-1312343861-172.17.0.2-1733637023632:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1312343861-172.17.0.2-1733637023632:blk_1073741837_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:36,418 WARN [ResponseProcessor for block BP-1312343861-172.17.0.2-1733637023632:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1312343861-172.17.0.2-1733637023632:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:36,418 WARN [ResponseProcessor for block BP-1312343861-172.17.0.2-1733637023632:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1312343861-172.17.0.2-1733637023632:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:36,418 WARN [ResponseProcessor for block BP-1312343861-172.17.0.2-1733637023632:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1312343861-172.17.0.2-1733637023632:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-1312343861-172.17.0.2-1733637023632:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:40747,DS-3ebf26fc-f51c-4143-acc9-f93fae9ced93,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:36,419 WARN [DataStreamer for file /user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.1733637025096 block BP-1312343861-172.17.0.2-1733637023632:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1312343861-172.17.0.2-1733637023632:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40747,DS-3ebf26fc-f51c-4143-acc9-f93fae9ced93,DISK], DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40747,DS-3ebf26fc-f51c-4143-acc9-f93fae9ced93,DISK]) is bad. 2024-12-08T05:50:36,419 WARN [DataStreamer for file /user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 block BP-1312343861-172.17.0.2-1733637023632:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1312343861-172.17.0.2-1733637023632:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40747,DS-3ebf26fc-f51c-4143-acc9-f93fae9ced93,DISK], DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40747,DS-3ebf26fc-f51c-4143-acc9-f93fae9ced93,DISK]) is bad. 2024-12-08T05:50:36,419 WARN [DataStreamer for file /user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta block BP-1312343861-172.17.0.2-1733637023632:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1312343861-172.17.0.2-1733637023632:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK], DatanodeInfoWithStorage[127.0.0.1:40747,DS-3ebf26fc-f51c-4143-acc9-f93fae9ced93,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40747,DS-3ebf26fc-f51c-4143-acc9-f93fae9ced93,DISK]) is bad. 2024-12-08T05:50:36,419 WARN [DataStreamer for file /user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/MasterData/WALs/0d942cb2025d,40185,1733637024511/0d942cb2025d%2C40185%2C1733637024511.1733637024730 block BP-1312343861-172.17.0.2-1733637023632:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1312343861-172.17.0.2-1733637023632:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40747,DS-3ebf26fc-f51c-4143-acc9-f93fae9ced93,DISK], DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40747,DS-3ebf26fc-f51c-4143-acc9-f93fae9ced93,DISK]) is bad. 2024-12-08T05:50:36,420 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-134678083_22 at /127.0.0.1:39940 [Receiving block BP-1312343861-172.17.0.2-1733637023632:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:40747:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39940 dst: /127.0.0.1:40747 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:50:36,420 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-479256288_22 at /127.0.0.1:39970 [Receiving block BP-1312343861-172.17.0.2-1733637023632:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:40747:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39970 dst: /127.0.0.1:40747 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:50:36,420 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-375756593_22 at /127.0.0.1:39910 [Receiving block BP-1312343861-172.17.0.2-1733637023632:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:40747:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39910 dst: /127.0.0.1:40747 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:50:36,421 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-479256288_22 at /127.0.0.1:38974 [Receiving block BP-1312343861-172.17.0.2-1733637023632:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:42141:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38974 dst: /127.0.0.1:42141 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:50:36,421 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-134678083_22 at /127.0.0.1:38948 [Receiving block BP-1312343861-172.17.0.2-1733637023632:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:42141:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38948 dst: /127.0.0.1:42141 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:50:36,421 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-375756593_22 at /127.0.0.1:38908 [Receiving block BP-1312343861-172.17.0.2-1733637023632:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:42141:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38908 dst: /127.0.0.1:42141 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:50:36,420 WARN [PacketResponder: BP-1312343861-172.17.0.2-1733637023632:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:40747] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:50:36,422 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2053fa2a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T05:50:36,422 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-134678083_22 at /127.0.0.1:38950 [Receiving block BP-1312343861-172.17.0.2-1733637023632:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:42141:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38950 dst: /127.0.0.1:42141 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:50:36,422 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-134678083_22 at /127.0.0.1:39946 [Receiving block BP-1312343861-172.17.0.2-1733637023632:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:40747:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39946 dst: /127.0.0.1:40747 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:50:36,422 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5b8a0b06{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T05:50:36,423 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T05:50:36,423 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@82e7b75{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T05:50:36,423 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2ed0b53{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/hadoop.log.dir/,STOPPED} 2024-12-08T05:50:36,424 WARN [BP-1312343861-172.17.0.2-1733637023632 heartbeating to localhost/127.0.0.1:46561 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T05:50:36,424 WARN [BP-1312343861-172.17.0.2-1733637023632 heartbeating to localhost/127.0.0.1:46561 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1312343861-172.17.0.2-1733637023632 (Datanode Uuid efd53946-5941-41e2-9b0b-bda82ebd7dd4) service to localhost/127.0.0.1:46561 2024-12-08T05:50:36,425 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T05:50:36,425 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T05:50:36,425 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/cluster_ad2183ec-0874-c6b8-5c7f-1444c5a18074/data/data3/current/BP-1312343861-172.17.0.2-1733637023632 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T05:50:36,426 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/cluster_ad2183ec-0874-c6b8-5c7f-1444c5a18074/data/data4/current/BP-1312343861-172.17.0.2-1733637023632 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T05:50:36,426 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T05:50:36,427 WARN [DataStreamer for file /user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta block BP-1312343861-172.17.0.2-1733637023632:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:36,427 WARN [DataStreamer for file /user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 block BP-1312343861-172.17.0.2-1733637023632:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:36,427 WARN [DataStreamer for file /user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/MasterData/WALs/0d942cb2025d,40185,1733637024511/0d942cb2025d%2C40185%2C1733637024511.1733637024730 block BP-1312343861-172.17.0.2-1733637023632:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:36,428 WARN [DataStreamer for file /user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.1733637025096 block BP-1312343861-172.17.0.2-1733637023632:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:36,430 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@30a12d6d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T05:50:36,430 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@441f58be{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T05:50:36,430 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T05:50:36,430 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1d9d8e2c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T05:50:36,430 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4475d850{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/hadoop.log.dir/,STOPPED} 2024-12-08T05:50:36,432 WARN [BP-1312343861-172.17.0.2-1733637023632 heartbeating to localhost/127.0.0.1:46561 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T05:50:36,432 WARN [BP-1312343861-172.17.0.2-1733637023632 heartbeating to localhost/127.0.0.1:46561 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1312343861-172.17.0.2-1733637023632 (Datanode Uuid 4ee35e7b-40eb-46c8-a64d-d562f2b8508e) service to localhost/127.0.0.1:46561 2024-12-08T05:50:36,432 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T05:50:36,432 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/cluster_ad2183ec-0874-c6b8-5c7f-1444c5a18074/data/data1/current/BP-1312343861-172.17.0.2-1733637023632 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T05:50:36,432 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T05:50:36,432 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/cluster_ad2183ec-0874-c6b8-5c7f-1444c5a18074/data/data2/current/BP-1312343861-172.17.0.2-1733637023632 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T05:50:36,433 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T05:50:36,436 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1733637025761.a0d875ebeb3b6e27843ae87b1cb3e2f8., hostname=0d942cb2025d,36989,1733637024575, seqNum=2] 2024-12-08T05:50:36,438 ERROR [FSHLog-0-hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10-prefix:0d942cb2025d,36989,1733637024575 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:36,438 WARN [FSHLog-0-hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10-prefix:0d942cb2025d,36989,1733637024575 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:36,438 INFO [regionserver/0d942cb2025d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:36,438 DEBUG [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 0d942cb2025d%2C36989%2C1733637024575:(num 1733637025096) roll requested 2024-12-08T05:50:36,438 INFO [regionserver/0d942cb2025d:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0d942cb2025d%2C36989%2C1733637024575.1733637036438 2024-12-08T05:50:36,444 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:36,444 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:36,444 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:36,444 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:36,444 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:36,445 INFO [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.1733637025096 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.1733637036438 2024-12-08T05:50:36,445 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:36,445 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:36,445 DEBUG [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39209:39209),(127.0.0.1/127.0.0.1:37355:37355)] 2024-12-08T05:50:36,445 DEBUG [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.1733637025096 is not closed yet, will try archiving it next time 2024-12-08T05:50:36,446 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-12-08T05:50:36,446 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-12-08T05:50:36,446 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.1733637025096 2024-12-08T05:50:36,449 WARN [IPC Server handler 0 on default port 46561 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.1733637025096 has not been closed. Lease recovery is in progress. RecoveryId = 1019 for block blk_1073741833_1009 2024-12-08T05:50:36,452 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.1733637025096 after 4ms 2024-12-08T05:50:36,695 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:37,720 INFO [regionserver/0d942cb2025d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:38,446 INFO [regionserver/0d942cb2025d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:38,447 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.1733637036438 2024-12-08T05:50:38,448 WARN [ResponseProcessor for block BP-1312343861-172.17.0.2-1733637023632:blk_1073741838_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1312343861-172.17.0.2-1733637023632:blk_1073741838_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:38,448 WARN [DataStreamer for file /user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.1733637036438 block BP-1312343861-172.17.0.2-1733637023632:blk_1073741838_1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1312343861-172.17.0.2-1733637023632:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40163,DS-c48f66e4-e0f6-4cef-a267-f504c8616ab7,DISK], DatanodeInfoWithStorage[127.0.0.1:35443,DS-04b517c0-4e1b-48ef-adae-ca58306a99bb,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40163,DS-c48f66e4-e0f6-4cef-a267-f504c8616ab7,DISK]) is bad. 2024-12-08T05:50:38,449 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-134678083_22 at /127.0.0.1:33302 [Receiving block BP-1312343861-172.17.0.2-1733637023632:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:40163:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33302 dst: /127.0.0.1:40163 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:50:38,449 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-134678083_22 at /127.0.0.1:46702 [Receiving block BP-1312343861-172.17.0.2-1733637023632:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:35443:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46702 dst: /127.0.0.1:35443 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:50:38,452 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@29d1c6fb{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T05:50:38,453 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@31a3e3f4{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T05:50:38,453 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T05:50:38,453 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4eebb985{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T05:50:38,453 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@53c8e058{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/hadoop.log.dir/,STOPPED} 2024-12-08T05:50:38,455 WARN [BP-1312343861-172.17.0.2-1733637023632 heartbeating to localhost/127.0.0.1:46561 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T05:50:38,455 WARN [BP-1312343861-172.17.0.2-1733637023632 heartbeating to localhost/127.0.0.1:46561 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1312343861-172.17.0.2-1733637023632 (Datanode Uuid 302c412d-fb86-4603-b346-54eeb740f071) service to localhost/127.0.0.1:46561 2024-12-08T05:50:38,455 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T05:50:38,455 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T05:50:38,456 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/cluster_ad2183ec-0874-c6b8-5c7f-1444c5a18074/data/data7/current/BP-1312343861-172.17.0.2-1733637023632 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T05:50:38,456 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/cluster_ad2183ec-0874-c6b8-5c7f-1444c5a18074/data/data8/current/BP-1312343861-172.17.0.2-1733637023632 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T05:50:38,456 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T05:50:38,695 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:39,721 INFO [regionserver/0d942cb2025d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:40,446 WARN [regionserver/0d942cb2025d:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35443,DS-04b517c0-4e1b-48ef-adae-ca58306a99bb,DISK]] 2024-12-08T05:50:40,447 INFO [regionserver/0d942cb2025d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:40,447 DEBUG [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 0d942cb2025d%2C36989%2C1733637024575:(num 1733637036438) roll requested 2024-12-08T05:50:40,447 INFO [regionserver/0d942cb2025d:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0d942cb2025d%2C36989%2C1733637024575.1733637040447 2024-12-08T05:50:40,452 WARN [Thread-908 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741839_1021 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:42141 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:40,452 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-134678083_22 at /127.0.0.1:46716 [Receiving block BP-1312343861-172.17.0.2-1733637023632:blk_1073741839_1021] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/cluster_ad2183ec-0874-c6b8-5c7f-1444c5a18074/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/cluster_ad2183ec-0874-c6b8-5c7f-1444c5a18074/data/data10]'}, localName='127.0.0.1:35443', datanodeUuid='b6350157-5b5c-4229-bb0e-1d4f5f672b97', xmitsInProgress=0}:Exception transferring block BP-1312343861-172.17.0.2-1733637023632:blk_1073741839_1021 to mirror 127.0.0.1:42141 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:50:40,452 WARN [Thread-908 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1312343861-172.17.0.2-1733637023632:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35443,DS-04b517c0-4e1b-48ef-adae-ca58306a99bb,DISK], DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK]) is bad. 2024-12-08T05:50:40,452 WARN [Thread-908 {}] hdfs.DataStreamer(1850): Abandoning BP-1312343861-172.17.0.2-1733637023632:blk_1073741839_1021 2024-12-08T05:50:40,452 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-134678083_22 at /127.0.0.1:46716 [Receiving block BP-1312343861-172.17.0.2-1733637023632:blk_1073741839_1021] {}] datanode.BlockReceiver(316): Block 1073741839 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-08T05:50:40,452 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-134678083_22 at /127.0.0.1:46716 [Receiving block BP-1312343861-172.17.0.2-1733637023632:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:35443:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46716 dst: /127.0.0.1:35443 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:50:40,453 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.1733637025096 after 4007ms 2024-12-08T05:50:40,455 WARN [Thread-908 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK] 2024-12-08T05:50:40,458 WARN [Thread-908 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1022 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:40,458 WARN [Thread-908 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1312343861-172.17.0.2-1733637023632:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40747,DS-3ebf26fc-f51c-4143-acc9-f93fae9ced93,DISK], DatanodeInfoWithStorage[127.0.0.1:39795,DS-97e2bfc6-c424-4d2c-99c4-a2f9a381994f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40747,DS-3ebf26fc-f51c-4143-acc9-f93fae9ced93,DISK]) is bad. 2024-12-08T05:50:40,458 WARN [Thread-908 {}] hdfs.DataStreamer(1850): Abandoning BP-1312343861-172.17.0.2-1733637023632:blk_1073741840_1022 2024-12-08T05:50:40,459 WARN [Thread-908 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40747,DS-3ebf26fc-f51c-4143-acc9-f93fae9ced93,DISK] 2024-12-08T05:50:40,462 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-08T05:50:40,474 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:40,474 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:40,474 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:40,474 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:40,474 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:40,475 INFO [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.1733637036438 with entries=3, filesize=3.51 KB; new WAL /user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.1733637040447 2024-12-08T05:50:40,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35443 is added to blk_1073741838_1020 (size=3600) 2024-12-08T05:50:40,493 DEBUG [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36929:36929),(127.0.0.1/127.0.0.1:37355:37355)] 2024-12-08T05:50:40,493 DEBUG [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.1733637025096 is not closed yet, will try archiving it next time 2024-12-08T05:50:40,493 DEBUG [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.1733637036438 is not closed yet, will try archiving it next time 2024-12-08T05:50:40,696 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:40,879 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.1733637025096 is not closed yet, will try archiving it next time 2024-12-08T05:50:41,721 INFO [regionserver/0d942cb2025d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:42,425 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@5a12b8ba[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35443, datanodeUuid=b6350157-5b5c-4229-bb0e-1d4f5f672b97, infoPort=37355, infoSecurePort=0, ipcPort=44739, storageInfo=lv=-57;cid=testClusterID;nsid=341549530;c=1733637023632):Failed to transfer BP-1312343861-172.17.0.2-1733637023632:blk_1073741838_1020 to 127.0.0.1:40163 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:50:42,470 WARN [ResponseProcessor for block BP-1312343861-172.17.0.2-1733637023632:blk_1073741841_1023 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1312343861-172.17.0.2-1733637023632:blk_1073741841_1023 java.io.IOException: Bad response ERROR for BP-1312343861-172.17.0.2-1733637023632:blk_1073741841_1023 from datanode DatanodeInfoWithStorage[127.0.0.1:35443,DS-04b517c0-4e1b-48ef-adae-ca58306a99bb,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:42,470 WARN [DataStreamer for file /user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.1733637040447 block BP-1312343861-172.17.0.2-1733637023632:blk_1073741841_1023 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1312343861-172.17.0.2-1733637023632:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39795,DS-97e2bfc6-c424-4d2c-99c4-a2f9a381994f,DISK], DatanodeInfoWithStorage[127.0.0.1:35443,DS-04b517c0-4e1b-48ef-adae-ca58306a99bb,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35443,DS-04b517c0-4e1b-48ef-adae-ca58306a99bb,DISK]) is bad. 2024-12-08T05:50:42,470 WARN [PacketResponder: BP-1312343861-172.17.0.2-1733637023632:blk_1073741841_1023, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:35443] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:50:42,471 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-134678083_22 at /127.0.0.1:56586 [Receiving block BP-1312343861-172.17.0.2-1733637023632:blk_1073741841_1023] {}] datanode.DataXceiver(331): 127.0.0.1:39795:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56586 dst: /127.0.0.1:39795 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:50:42,471 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-134678083_22 at /127.0.0.1:46722 [Receiving block BP-1312343861-172.17.0.2-1733637023632:blk_1073741841_1023] {}] datanode.DataXceiver(331): 127.0.0.1:35443:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46722 dst: /127.0.0.1:35443 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:50:42,472 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6788d34a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T05:50:42,472 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@66d367d8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T05:50:42,472 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T05:50:42,472 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@61ab06b3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T05:50:42,472 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6aeddc33{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/hadoop.log.dir/,STOPPED} 2024-12-08T05:50:42,474 WARN [BP-1312343861-172.17.0.2-1733637023632 heartbeating to localhost/127.0.0.1:46561 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T05:50:42,474 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T05:50:42,474 WARN [BP-1312343861-172.17.0.2-1733637023632 heartbeating to localhost/127.0.0.1:46561 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1312343861-172.17.0.2-1733637023632 (Datanode Uuid b6350157-5b5c-4229-bb0e-1d4f5f672b97) service to localhost/127.0.0.1:46561 2024-12-08T05:50:42,474 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T05:50:42,475 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/cluster_ad2183ec-0874-c6b8-5c7f-1444c5a18074/data/data9/current/BP-1312343861-172.17.0.2-1733637023632 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T05:50:42,475 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/cluster_ad2183ec-0874-c6b8-5c7f-1444c5a18074/data/data10/current/BP-1312343861-172.17.0.2-1733637023632 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T05:50:42,475 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T05:50:42,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36989 {}] regionserver.HRegion(8855): Flush requested on a0d875ebeb3b6e27843ae87b1cb3e2f8 2024-12-08T05:50:42,483 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a0d875ebeb3b6e27843ae87b1cb3e2f8 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-08T05:50:42,493 WARN [regionserver/0d942cb2025d:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39795,DS-97e2bfc6-c424-4d2c-99c4-a2f9a381994f,DISK]] 2024-12-08T05:50:42,493 INFO [regionserver/0d942cb2025d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:42,493 DEBUG [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 0d942cb2025d%2C36989%2C1733637024575:(num 1733637040447) roll requested 2024-12-08T05:50:42,494 INFO [regionserver/0d942cb2025d:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0d942cb2025d%2C36989%2C1733637024575.1733637042493 2024-12-08T05:50:42,497 WARN [Thread-921 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741842_1025 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:42,497 WARN [Thread-921 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1312343861-172.17.0.2-1733637023632:blk_1073741842_1025 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK], DatanodeInfoWithStorage[127.0.0.1:35443,DS-04b517c0-4e1b-48ef-adae-ca58306a99bb,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK]) is bad. 2024-12-08T05:50:42,497 WARN [Thread-921 {}] hdfs.DataStreamer(1850): Abandoning BP-1312343861-172.17.0.2-1733637023632:blk_1073741842_1025 2024-12-08T05:50:42,497 WARN [Thread-921 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK] 2024-12-08T05:50:42,499 WARN [Thread-921 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1026 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:42,499 WARN [Thread-921 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1312343861-172.17.0.2-1733637023632:blk_1073741843_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35443,DS-04b517c0-4e1b-48ef-adae-ca58306a99bb,DISK], DatanodeInfoWithStorage[127.0.0.1:40163,DS-c48f66e4-e0f6-4cef-a267-f504c8616ab7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35443,DS-04b517c0-4e1b-48ef-adae-ca58306a99bb,DISK]) is bad. 2024-12-08T05:50:42,499 WARN [Thread-921 {}] hdfs.DataStreamer(1850): Abandoning BP-1312343861-172.17.0.2-1733637023632:blk_1073741843_1026 2024-12-08T05:50:42,499 WARN [Thread-921 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35443,DS-04b517c0-4e1b-48ef-adae-ca58306a99bb,DISK] 2024-12-08T05:50:42,501 WARN [Thread-921 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:42,501 WARN [Thread-921 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1312343861-172.17.0.2-1733637023632:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40163,DS-c48f66e4-e0f6-4cef-a267-f504c8616ab7,DISK], DatanodeInfoWithStorage[127.0.0.1:39795,DS-97e2bfc6-c424-4d2c-99c4-a2f9a381994f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40163,DS-c48f66e4-e0f6-4cef-a267-f504c8616ab7,DISK]) is bad. 2024-12-08T05:50:42,501 WARN [Thread-921 {}] hdfs.DataStreamer(1850): Abandoning BP-1312343861-172.17.0.2-1733637023632:blk_1073741844_1027 2024-12-08T05:50:42,501 WARN [Thread-921 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40163,DS-c48f66e4-e0f6-4cef-a267-f504c8616ab7,DISK] 2024-12-08T05:50:42,502 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/.tmp/info/6de45c20b1c24069bc6bfb6c49fca343 is 1080, key is row0002/info:/1733637038458/Put/seqid=0 2024-12-08T05:50:42,504 WARN [Thread-921 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40747 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:42,504 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-134678083_22 at /127.0.0.1:60538 [Receiving block BP-1312343861-172.17.0.2-1733637023632:blk_1073741845_1028] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/cluster_ad2183ec-0874-c6b8-5c7f-1444c5a18074/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/cluster_ad2183ec-0874-c6b8-5c7f-1444c5a18074/data/data6]'}, localName='127.0.0.1:39795', datanodeUuid='107d6efa-7b49-4521-92ae-23356ed2ace3', xmitsInProgress=0}:Exception transferring block BP-1312343861-172.17.0.2-1733637023632:blk_1073741845_1028 to mirror 127.0.0.1:40747 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:50:42,504 WARN [Thread-921 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1312343861-172.17.0.2-1733637023632:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39795,DS-97e2bfc6-c424-4d2c-99c4-a2f9a381994f,DISK], DatanodeInfoWithStorage[127.0.0.1:40747,DS-3ebf26fc-f51c-4143-acc9-f93fae9ced93,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40747,DS-3ebf26fc-f51c-4143-acc9-f93fae9ced93,DISK]) is bad. 2024-12-08T05:50:42,504 WARN [Thread-921 {}] hdfs.DataStreamer(1850): Abandoning BP-1312343861-172.17.0.2-1733637023632:blk_1073741845_1028 2024-12-08T05:50:42,504 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-134678083_22 at /127.0.0.1:60538 [Receiving block BP-1312343861-172.17.0.2-1733637023632:blk_1073741845_1028] {}] datanode.BlockReceiver(316): Block 1073741845 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-08T05:50:42,504 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-134678083_22 at /127.0.0.1:60538 [Receiving block BP-1312343861-172.17.0.2-1733637023632:blk_1073741845_1028] {}] datanode.DataXceiver(331): 127.0.0.1:39795:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60538 dst: /127.0.0.1:39795 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:50:42,505 WARN [Thread-921 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40747,DS-3ebf26fc-f51c-4143-acc9-f93fae9ced93,DISK] 2024-12-08T05:50:42,505 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-134678083_22 at /127.0.0.1:60546 [Receiving block BP-1312343861-172.17.0.2-1733637023632:blk_1073741846_1029] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/cluster_ad2183ec-0874-c6b8-5c7f-1444c5a18074/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/cluster_ad2183ec-0874-c6b8-5c7f-1444c5a18074/data/data6]'}, localName='127.0.0.1:39795', datanodeUuid='107d6efa-7b49-4521-92ae-23356ed2ace3', xmitsInProgress=0}:Exception transferring block BP-1312343861-172.17.0.2-1733637023632:blk_1073741846_1029 to mirror 127.0.0.1:35443 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:50:42,505 WARN [Thread-920 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35443 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:42,505 WARN [IPC Server handler 0 on default port 46561 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-08T05:50:42,505 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-134678083_22 at /127.0.0.1:60546 [Receiving block BP-1312343861-172.17.0.2-1733637023632:blk_1073741846_1029] {}] datanode.BlockReceiver(316): Block 1073741846 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-08T05:50:42,505 WARN [Thread-920 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1312343861-172.17.0.2-1733637023632:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39795,DS-97e2bfc6-c424-4d2c-99c4-a2f9a381994f,DISK], DatanodeInfoWithStorage[127.0.0.1:35443,DS-04b517c0-4e1b-48ef-adae-ca58306a99bb,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35443,DS-04b517c0-4e1b-48ef-adae-ca58306a99bb,DISK]) is bad. 2024-12-08T05:50:42,505 WARN [Thread-920 {}] hdfs.DataStreamer(1850): Abandoning BP-1312343861-172.17.0.2-1733637023632:blk_1073741846_1029 2024-12-08T05:50:42,505 WARN [IPC Server handler 0 on default port 46561 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-08T05:50:42,505 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-134678083_22 at /127.0.0.1:60546 [Receiving block BP-1312343861-172.17.0.2-1733637023632:blk_1073741846_1029] {}] datanode.DataXceiver(331): 127.0.0.1:39795:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60546 dst: /127.0.0.1:39795 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:50:42,505 WARN [IPC Server handler 0 on default port 46561 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-08T05:50:42,506 WARN [Thread-920 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35443,DS-04b517c0-4e1b-48ef-adae-ca58306a99bb,DISK] 2024-12-08T05:50:42,507 WARN [Thread-920 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:42,507 WARN [Thread-920 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1312343861-172.17.0.2-1733637023632:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK], DatanodeInfoWithStorage[127.0.0.1:40163,DS-c48f66e4-e0f6-4cef-a267-f504c8616ab7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK]) is bad. 2024-12-08T05:50:42,507 WARN [Thread-920 {}] hdfs.DataStreamer(1850): Abandoning BP-1312343861-172.17.0.2-1733637023632:blk_1073741848_1031 2024-12-08T05:50:42,508 WARN [Thread-920 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK] 2024-12-08T05:50:42,508 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:42,508 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:42,508 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:42,508 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:42,509 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:42,509 INFO [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.1733637040447 with entries=12, filesize=12.96 KB; new WAL /user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.1733637042493 2024-12-08T05:50:42,509 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-134678083_22 at /127.0.0.1:60558 [Receiving block BP-1312343861-172.17.0.2-1733637023632:blk_1073741849_1032] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/cluster_ad2183ec-0874-c6b8-5c7f-1444c5a18074/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/cluster_ad2183ec-0874-c6b8-5c7f-1444c5a18074/data/data6]'}, localName='127.0.0.1:39795', datanodeUuid='107d6efa-7b49-4521-92ae-23356ed2ace3', xmitsInProgress=0}:Exception transferring block BP-1312343861-172.17.0.2-1733637023632:blk_1073741849_1032 to mirror 127.0.0.1:40747 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:50:42,509 WARN [Thread-920 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40747 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:42,510 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-134678083_22 at /127.0.0.1:60558 [Receiving block BP-1312343861-172.17.0.2-1733637023632:blk_1073741849_1032] {}] datanode.BlockReceiver(316): Block 1073741849 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-08T05:50:42,510 WARN [Thread-920 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1312343861-172.17.0.2-1733637023632:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39795,DS-97e2bfc6-c424-4d2c-99c4-a2f9a381994f,DISK], DatanodeInfoWithStorage[127.0.0.1:40747,DS-3ebf26fc-f51c-4143-acc9-f93fae9ced93,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40747,DS-3ebf26fc-f51c-4143-acc9-f93fae9ced93,DISK]) is bad. 2024-12-08T05:50:42,510 WARN [Thread-920 {}] hdfs.DataStreamer(1850): Abandoning BP-1312343861-172.17.0.2-1733637023632:blk_1073741849_1032 2024-12-08T05:50:42,510 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-134678083_22 at /127.0.0.1:60558 [Receiving block BP-1312343861-172.17.0.2-1733637023632:blk_1073741849_1032] {}] datanode.DataXceiver(331): 127.0.0.1:39795:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60558 dst: /127.0.0.1:39795 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:50:42,510 WARN [Thread-920 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40747,DS-3ebf26fc-f51c-4143-acc9-f93fae9ced93,DISK] 2024-12-08T05:50:42,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39795 is added to blk_1073741841_1024 (size=13274) 2024-12-08T05:50:42,513 WARN [Thread-920 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40163 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:42,513 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-134678083_22 at /127.0.0.1:60570 [Receiving block BP-1312343861-172.17.0.2-1733637023632:blk_1073741850_1033] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/cluster_ad2183ec-0874-c6b8-5c7f-1444c5a18074/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/cluster_ad2183ec-0874-c6b8-5c7f-1444c5a18074/data/data6]'}, localName='127.0.0.1:39795', datanodeUuid='107d6efa-7b49-4521-92ae-23356ed2ace3', xmitsInProgress=0}:Exception transferring block BP-1312343861-172.17.0.2-1733637023632:blk_1073741850_1033 to mirror 127.0.0.1:40163 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:50:42,513 WARN [Thread-920 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1312343861-172.17.0.2-1733637023632:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39795,DS-97e2bfc6-c424-4d2c-99c4-a2f9a381994f,DISK], DatanodeInfoWithStorage[127.0.0.1:40163,DS-c48f66e4-e0f6-4cef-a267-f504c8616ab7,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40163,DS-c48f66e4-e0f6-4cef-a267-f504c8616ab7,DISK]) is bad. 2024-12-08T05:50:42,513 WARN [Thread-920 {}] hdfs.DataStreamer(1850): Abandoning BP-1312343861-172.17.0.2-1733637023632:blk_1073741850_1033 2024-12-08T05:50:42,513 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-134678083_22 at /127.0.0.1:60570 [Receiving block BP-1312343861-172.17.0.2-1733637023632:blk_1073741850_1033] {}] datanode.BlockReceiver(316): Block 1073741850 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-08T05:50:42,513 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-134678083_22 at /127.0.0.1:60570 [Receiving block BP-1312343861-172.17.0.2-1733637023632:blk_1073741850_1033] {}] datanode.DataXceiver(331): 127.0.0.1:39795:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60570 dst: /127.0.0.1:39795 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:50:42,514 WARN [Thread-920 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40163,DS-c48f66e4-e0f6-4cef-a267-f504c8616ab7,DISK] 2024-12-08T05:50:42,514 DEBUG [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36929:36929)] 2024-12-08T05:50:42,514 DEBUG [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.1733637025096 is not closed yet, will try archiving it next time 2024-12-08T05:50:42,514 DEBUG [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.1733637040447 is not closed yet, will try archiving it next time 2024-12-08T05:50:42,514 WARN [IPC Server handler 2 on default port 46561 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-08T05:50:42,515 WARN [IPC Server handler 2 on default port 46561 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-08T05:50:42,515 WARN [IPC Server handler 2 on default port 46561 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-08T05:50:42,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39795 is added to blk_1073741851_1034 (size=10347) 2024-12-08T05:50:42,696 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:42,911 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.1733637025096 is not closed yet, will try archiving it next time 2024-12-08T05:50:42,918 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/.tmp/info/6de45c20b1c24069bc6bfb6c49fca343 2024-12-08T05:50:42,925 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/.tmp/info/6de45c20b1c24069bc6bfb6c49fca343 as hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/info/6de45c20b1c24069bc6bfb6c49fca343 2024-12-08T05:50:42,930 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/info/6de45c20b1c24069bc6bfb6c49fca343, entries=5, sequenceid=11, filesize=10.1 K 2024-12-08T05:50:42,931 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for a0d875ebeb3b6e27843ae87b1cb3e2f8 in 448ms, sequenceid=11, compaction requested=false 2024-12-08T05:50:42,931 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a0d875ebeb3b6e27843ae87b1cb3e2f8: 2024-12-08T05:50:43,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36989 {}] regionserver.HRegion(8855): Flush requested on a0d875ebeb3b6e27843ae87b1cb3e2f8 2024-12-08T05:50:43,103 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a0d875ebeb3b6e27843ae87b1cb3e2f8 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-12-08T05:50:43,108 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/.tmp/info/04e0134a2aa44006a873e9e9756badd4 is 1080, key is row0007/info:/1733637042484/Put/seqid=0 2024-12-08T05:50:43,110 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741852_1035 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:43,110 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1312343861-172.17.0.2-1733637023632:blk_1073741852_1035 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40747,DS-3ebf26fc-f51c-4143-acc9-f93fae9ced93,DISK], DatanodeInfoWithStorage[127.0.0.1:35443,DS-04b517c0-4e1b-48ef-adae-ca58306a99bb,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40747,DS-3ebf26fc-f51c-4143-acc9-f93fae9ced93,DISK]) is bad. 2024-12-08T05:50:43,110 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-1312343861-172.17.0.2-1733637023632:blk_1073741852_1035 2024-12-08T05:50:43,110 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40747,DS-3ebf26fc-f51c-4143-acc9-f93fae9ced93,DISK] 2024-12-08T05:50:43,111 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:43,112 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1312343861-172.17.0.2-1733637023632:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK], DatanodeInfoWithStorage[127.0.0.1:40163,DS-c48f66e4-e0f6-4cef-a267-f504c8616ab7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK]) is bad. 2024-12-08T05:50:43,112 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-1312343861-172.17.0.2-1733637023632:blk_1073741853_1036 2024-12-08T05:50:43,112 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK] 2024-12-08T05:50:43,114 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40163 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:43,114 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-134678083_22 at /127.0.0.1:60604 [Receiving block BP-1312343861-172.17.0.2-1733637023632:blk_1073741854_1037] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/cluster_ad2183ec-0874-c6b8-5c7f-1444c5a18074/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/cluster_ad2183ec-0874-c6b8-5c7f-1444c5a18074/data/data6]'}, localName='127.0.0.1:39795', datanodeUuid='107d6efa-7b49-4521-92ae-23356ed2ace3', xmitsInProgress=0}:Exception transferring block BP-1312343861-172.17.0.2-1733637023632:blk_1073741854_1037 to mirror 127.0.0.1:40163 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:50:43,114 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1312343861-172.17.0.2-1733637023632:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39795,DS-97e2bfc6-c424-4d2c-99c4-a2f9a381994f,DISK], DatanodeInfoWithStorage[127.0.0.1:40163,DS-c48f66e4-e0f6-4cef-a267-f504c8616ab7,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40163,DS-c48f66e4-e0f6-4cef-a267-f504c8616ab7,DISK]) is bad. 2024-12-08T05:50:43,114 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-1312343861-172.17.0.2-1733637023632:blk_1073741854_1037 2024-12-08T05:50:43,114 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-134678083_22 at /127.0.0.1:60604 [Receiving block BP-1312343861-172.17.0.2-1733637023632:blk_1073741854_1037] {}] datanode.BlockReceiver(316): Block 1073741854 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-08T05:50:43,114 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-134678083_22 at /127.0.0.1:60604 [Receiving block BP-1312343861-172.17.0.2-1733637023632:blk_1073741854_1037] {}] datanode.DataXceiver(331): 127.0.0.1:39795:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60604 dst: /127.0.0.1:39795 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:50:43,115 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40163,DS-c48f66e4-e0f6-4cef-a267-f504c8616ab7,DISK] 2024-12-08T05:50:43,117 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35443 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:43,117 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-134678083_22 at /127.0.0.1:60618 [Receiving block BP-1312343861-172.17.0.2-1733637023632:blk_1073741855_1038] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/cluster_ad2183ec-0874-c6b8-5c7f-1444c5a18074/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/cluster_ad2183ec-0874-c6b8-5c7f-1444c5a18074/data/data6]'}, localName='127.0.0.1:39795', datanodeUuid='107d6efa-7b49-4521-92ae-23356ed2ace3', xmitsInProgress=0}:Exception transferring block BP-1312343861-172.17.0.2-1733637023632:blk_1073741855_1038 to mirror 127.0.0.1:35443 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:50:43,117 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1312343861-172.17.0.2-1733637023632:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39795,DS-97e2bfc6-c424-4d2c-99c4-a2f9a381994f,DISK], DatanodeInfoWithStorage[127.0.0.1:35443,DS-04b517c0-4e1b-48ef-adae-ca58306a99bb,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35443,DS-04b517c0-4e1b-48ef-adae-ca58306a99bb,DISK]) is bad. 2024-12-08T05:50:43,117 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-1312343861-172.17.0.2-1733637023632:blk_1073741855_1038 2024-12-08T05:50:43,117 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-134678083_22 at /127.0.0.1:60618 [Receiving block BP-1312343861-172.17.0.2-1733637023632:blk_1073741855_1038] {}] datanode.BlockReceiver(316): Block 1073741855 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-08T05:50:43,117 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-134678083_22 at /127.0.0.1:60618 [Receiving block BP-1312343861-172.17.0.2-1733637023632:blk_1073741855_1038] {}] datanode.DataXceiver(331): 127.0.0.1:39795:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60618 dst: /127.0.0.1:39795 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:50:43,117 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35443,DS-04b517c0-4e1b-48ef-adae-ca58306a99bb,DISK] 2024-12-08T05:50:43,118 WARN [IPC Server handler 1 on default port 46561 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-08T05:50:43,118 WARN [IPC Server handler 1 on default port 46561 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-08T05:50:43,118 WARN [IPC Server handler 1 on default port 46561 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-08T05:50:43,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39795 is added to blk_1073741856_1039 (size=12506) 2024-12-08T05:50:43,522 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/.tmp/info/04e0134a2aa44006a873e9e9756badd4 2024-12-08T05:50:43,528 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/.tmp/info/04e0134a2aa44006a873e9e9756badd4 as hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/info/04e0134a2aa44006a873e9e9756badd4 2024-12-08T05:50:43,533 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/info/04e0134a2aa44006a873e9e9756badd4, entries=7, sequenceid=24, filesize=12.2 K 2024-12-08T05:50:43,535 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for a0d875ebeb3b6e27843ae87b1cb3e2f8 in 431ms, sequenceid=24, compaction requested=false 2024-12-08T05:50:43,535 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a0d875ebeb3b6e27843ae87b1cb3e2f8: 2024-12-08T05:50:43,535 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-12-08T05:50:43,535 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T05:50:43,535 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/info/04e0134a2aa44006a873e9e9756badd4 because midkey is the same as first or last row 2024-12-08T05:50:43,721 INFO [regionserver/0d942cb2025d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:44,514 WARN [regionserver/0d942cb2025d:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39795,DS-97e2bfc6-c424-4d2c-99c4-a2f9a381994f,DISK]] 2024-12-08T05:50:44,515 INFO [regionserver/0d942cb2025d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:44,515 DEBUG [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 0d942cb2025d%2C36989%2C1733637024575:(num 1733637042493) roll requested 2024-12-08T05:50:44,515 INFO [regionserver/0d942cb2025d:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0d942cb2025d%2C36989%2C1733637024575.1733637044515 2024-12-08T05:50:44,518 WARN [Thread-939 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741857_1040 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:44,518 WARN [Thread-939 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1312343861-172.17.0.2-1733637023632:blk_1073741857_1040 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40163,DS-c48f66e4-e0f6-4cef-a267-f504c8616ab7,DISK], DatanodeInfoWithStorage[127.0.0.1:40747,DS-3ebf26fc-f51c-4143-acc9-f93fae9ced93,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40163,DS-c48f66e4-e0f6-4cef-a267-f504c8616ab7,DISK]) is bad. 2024-12-08T05:50:44,518 WARN [Thread-939 {}] hdfs.DataStreamer(1850): Abandoning BP-1312343861-172.17.0.2-1733637023632:blk_1073741857_1040 2024-12-08T05:50:44,518 WARN [Thread-939 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40163,DS-c48f66e4-e0f6-4cef-a267-f504c8616ab7,DISK] 2024-12-08T05:50:44,519 WARN [Thread-939 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:44,520 WARN [Thread-939 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1312343861-172.17.0.2-1733637023632:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35443,DS-04b517c0-4e1b-48ef-adae-ca58306a99bb,DISK], DatanodeInfoWithStorage[127.0.0.1:39795,DS-97e2bfc6-c424-4d2c-99c4-a2f9a381994f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35443,DS-04b517c0-4e1b-48ef-adae-ca58306a99bb,DISK]) is bad. 2024-12-08T05:50:44,520 WARN [Thread-939 {}] hdfs.DataStreamer(1850): Abandoning BP-1312343861-172.17.0.2-1733637023632:blk_1073741858_1041 2024-12-08T05:50:44,520 WARN [Thread-939 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35443,DS-04b517c0-4e1b-48ef-adae-ca58306a99bb,DISK] 2024-12-08T05:50:44,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36989 {}] regionserver.HRegion(8855): Flush requested on a0d875ebeb3b6e27843ae87b1cb3e2f8 2024-12-08T05:50:44,521 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a0d875ebeb3b6e27843ae87b1cb3e2f8 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-08T05:50:44,522 WARN [Thread-939 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40747 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:44,522 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-134678083_22 at /127.0.0.1:60630 [Receiving block BP-1312343861-172.17.0.2-1733637023632:blk_1073741859_1042] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/cluster_ad2183ec-0874-c6b8-5c7f-1444c5a18074/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/cluster_ad2183ec-0874-c6b8-5c7f-1444c5a18074/data/data6]'}, localName='127.0.0.1:39795', datanodeUuid='107d6efa-7b49-4521-92ae-23356ed2ace3', xmitsInProgress=0}:Exception transferring block BP-1312343861-172.17.0.2-1733637023632:blk_1073741859_1042 to mirror 127.0.0.1:40747 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:50:44,522 WARN [Thread-939 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1312343861-172.17.0.2-1733637023632:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39795,DS-97e2bfc6-c424-4d2c-99c4-a2f9a381994f,DISK], DatanodeInfoWithStorage[127.0.0.1:40747,DS-3ebf26fc-f51c-4143-acc9-f93fae9ced93,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40747,DS-3ebf26fc-f51c-4143-acc9-f93fae9ced93,DISK]) is bad. 2024-12-08T05:50:44,522 WARN [Thread-939 {}] hdfs.DataStreamer(1850): Abandoning BP-1312343861-172.17.0.2-1733637023632:blk_1073741859_1042 2024-12-08T05:50:44,522 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-134678083_22 at /127.0.0.1:60630 [Receiving block BP-1312343861-172.17.0.2-1733637023632:blk_1073741859_1042] {}] datanode.BlockReceiver(316): Block 1073741859 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-08T05:50:44,523 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-134678083_22 at /127.0.0.1:60630 [Receiving block BP-1312343861-172.17.0.2-1733637023632:blk_1073741859_1042] {}] datanode.DataXceiver(331): 127.0.0.1:39795:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60630 dst: /127.0.0.1:39795 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:50:44,523 WARN [Thread-939 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40747,DS-3ebf26fc-f51c-4143-acc9-f93fae9ced93,DISK] 2024-12-08T05:50:44,524 WARN [Thread-939 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:44,524 WARN [Thread-939 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1312343861-172.17.0.2-1733637023632:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK], DatanodeInfoWithStorage[127.0.0.1:39795,DS-97e2bfc6-c424-4d2c-99c4-a2f9a381994f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK]) is bad. 2024-12-08T05:50:44,524 WARN [Thread-939 {}] hdfs.DataStreamer(1850): Abandoning BP-1312343861-172.17.0.2-1733637023632:blk_1073741860_1043 2024-12-08T05:50:44,525 WARN [Thread-939 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK] 2024-12-08T05:50:44,525 WARN [IPC Server handler 4 on default port 46561 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-08T05:50:44,526 WARN [IPC Server handler 4 on default port 46561 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-08T05:50:44,526 WARN [IPC Server handler 4 on default port 46561 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-08T05:50:44,526 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/.tmp/info/b5fcd51534d944238112dfdaa89730c4 is 1079, key is tmprow/info:/1733637044520/Put/seqid=0 2024-12-08T05:50:44,527 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741862_1045 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:44,527 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1312343861-172.17.0.2-1733637023632:blk_1073741862_1045 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40163,DS-c48f66e4-e0f6-4cef-a267-f504c8616ab7,DISK], DatanodeInfoWithStorage[127.0.0.1:39795,DS-97e2bfc6-c424-4d2c-99c4-a2f9a381994f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40163,DS-c48f66e4-e0f6-4cef-a267-f504c8616ab7,DISK]) is bad. 2024-12-08T05:50:44,528 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-1312343861-172.17.0.2-1733637023632:blk_1073741862_1045 2024-12-08T05:50:44,528 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40163,DS-c48f66e4-e0f6-4cef-a267-f504c8616ab7,DISK] 2024-12-08T05:50:44,531 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:44,531 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:44,531 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35443 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:44,531 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-134678083_22 at /127.0.0.1:60646 [Receiving block BP-1312343861-172.17.0.2-1733637023632:blk_1073741863_1046] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/cluster_ad2183ec-0874-c6b8-5c7f-1444c5a18074/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/cluster_ad2183ec-0874-c6b8-5c7f-1444c5a18074/data/data6]'}, localName='127.0.0.1:39795', datanodeUuid='107d6efa-7b49-4521-92ae-23356ed2ace3', xmitsInProgress=0}:Exception transferring block BP-1312343861-172.17.0.2-1733637023632:blk_1073741863_1046 to mirror 127.0.0.1:35443 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:50:44,532 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1312343861-172.17.0.2-1733637023632:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39795,DS-97e2bfc6-c424-4d2c-99c4-a2f9a381994f,DISK], DatanodeInfoWithStorage[127.0.0.1:35443,DS-04b517c0-4e1b-48ef-adae-ca58306a99bb,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35443,DS-04b517c0-4e1b-48ef-adae-ca58306a99bb,DISK]) is bad. 2024-12-08T05:50:44,532 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:44,532 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-1312343861-172.17.0.2-1733637023632:blk_1073741863_1046 2024-12-08T05:50:44,532 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:44,532 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-134678083_22 at /127.0.0.1:60646 [Receiving block BP-1312343861-172.17.0.2-1733637023632:blk_1073741863_1046] {}] datanode.BlockReceiver(316): Block 1073741863 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-08T05:50:44,532 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:44,532 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-134678083_22 at /127.0.0.1:60646 [Receiving block BP-1312343861-172.17.0.2-1733637023632:blk_1073741863_1046] {}] datanode.DataXceiver(331): 127.0.0.1:39795:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60646 dst: /127.0.0.1:39795 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:50:44,532 INFO [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.1733637042493 with entries=14, filesize=12.82 KB; new WAL /user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.1733637044515 2024-12-08T05:50:44,532 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35443,DS-04b517c0-4e1b-48ef-adae-ca58306a99bb,DISK] 2024-12-08T05:50:44,533 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:44,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39795 is added to blk_1073741847_1030 (size=13133) 2024-12-08T05:50:44,533 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1312343861-172.17.0.2-1733637023632:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK], DatanodeInfoWithStorage[127.0.0.1:39795,DS-97e2bfc6-c424-4d2c-99c4-a2f9a381994f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK]) is bad. 2024-12-08T05:50:44,533 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-1312343861-172.17.0.2-1733637023632:blk_1073741864_1047 2024-12-08T05:50:44,534 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK] 2024-12-08T05:50:44,536 DEBUG [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36929:36929)] 2024-12-08T05:50:44,536 DEBUG [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.1733637025096 is not closed yet, will try archiving it next time 2024-12-08T05:50:44,536 DEBUG [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.1733637042493 is not closed yet, will try archiving it next time 2024-12-08T05:50:44,540 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.1733637036438 to hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/oldWALs/0d942cb2025d%2C36989%2C1733637024575.1733637036438 2024-12-08T05:50:44,541 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40747 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:44,541 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-134678083_22 at /127.0.0.1:60658 [Receiving block BP-1312343861-172.17.0.2-1733637023632:blk_1073741865_1048] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/cluster_ad2183ec-0874-c6b8-5c7f-1444c5a18074/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/cluster_ad2183ec-0874-c6b8-5c7f-1444c5a18074/data/data6]'}, localName='127.0.0.1:39795', datanodeUuid='107d6efa-7b49-4521-92ae-23356ed2ace3', xmitsInProgress=0}:Exception transferring block BP-1312343861-172.17.0.2-1733637023632:blk_1073741865_1048 to mirror 127.0.0.1:40747 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:50:44,541 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1312343861-172.17.0.2-1733637023632:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39795,DS-97e2bfc6-c424-4d2c-99c4-a2f9a381994f,DISK], DatanodeInfoWithStorage[127.0.0.1:40747,DS-3ebf26fc-f51c-4143-acc9-f93fae9ced93,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40747,DS-3ebf26fc-f51c-4143-acc9-f93fae9ced93,DISK]) is bad. 2024-12-08T05:50:44,541 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-1312343861-172.17.0.2-1733637023632:blk_1073741865_1048 2024-12-08T05:50:44,541 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-134678083_22 at /127.0.0.1:60658 [Receiving block BP-1312343861-172.17.0.2-1733637023632:blk_1073741865_1048] {}] datanode.BlockReceiver(316): Block 1073741865 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-08T05:50:44,541 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-134678083_22 at /127.0.0.1:60658 [Receiving block BP-1312343861-172.17.0.2-1733637023632:blk_1073741865_1048] {}] datanode.DataXceiver(331): 127.0.0.1:39795:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60658 dst: /127.0.0.1:39795 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:50:44,542 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40747,DS-3ebf26fc-f51c-4143-acc9-f93fae9ced93,DISK] 2024-12-08T05:50:44,542 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.1733637040447 to hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/oldWALs/0d942cb2025d%2C36989%2C1733637024575.1733637040447 2024-12-08T05:50:44,542 WARN [IPC Server handler 4 on default port 46561 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-08T05:50:44,542 WARN [IPC Server handler 4 on default port 46561 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-08T05:50:44,542 WARN [IPC Server handler 4 on default port 46561 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-08T05:50:44,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39795 is added to blk_1073741866_1049 (size=6027) 2024-12-08T05:50:44,697 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:44,935 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.1733637025096 is not closed yet, will try archiving it next time 2024-12-08T05:50:44,946 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/.tmp/info/b5fcd51534d944238112dfdaa89730c4 2024-12-08T05:50:44,954 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/.tmp/info/b5fcd51534d944238112dfdaa89730c4 as hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/info/b5fcd51534d944238112dfdaa89730c4 2024-12-08T05:50:44,961 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/info/b5fcd51534d944238112dfdaa89730c4, entries=1, sequenceid=34, filesize=5.9 K 2024-12-08T05:50:44,962 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for a0d875ebeb3b6e27843ae87b1cb3e2f8 in 441ms, sequenceid=34, compaction requested=true 2024-12-08T05:50:44,963 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a0d875ebeb3b6e27843ae87b1cb3e2f8: 2024-12-08T05:50:44,963 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-12-08T05:50:44,963 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T05:50:44,963 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/info/04e0134a2aa44006a873e9e9756badd4 because midkey is the same as first or last row 2024-12-08T05:50:44,963 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a0d875ebeb3b6e27843ae87b1cb3e2f8:info, priority=-2147483648, current under compaction store size is 1 2024-12-08T05:50:44,963 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T05:50:44,963 DEBUG [RS:0;0d942cb2025d:36989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T05:50:44,965 DEBUG [RS:0;0d942cb2025d:36989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T05:50:44,965 DEBUG [RS:0;0d942cb2025d:36989-shortCompactions-0 {}] regionserver.HStore(1541): a0d875ebeb3b6e27843ae87b1cb3e2f8/info is initiating minor compaction (all files) 2024-12-08T05:50:44,965 INFO [RS:0;0d942cb2025d:36989-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of a0d875ebeb3b6e27843ae87b1cb3e2f8/info in TestLogRolling-testLogRollOnDatanodeDeath,,1733637025761.a0d875ebeb3b6e27843ae87b1cb3e2f8. 2024-12-08T05:50:44,965 INFO [RS:0;0d942cb2025d:36989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/info/6de45c20b1c24069bc6bfb6c49fca343, hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/info/04e0134a2aa44006a873e9e9756badd4, hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/info/b5fcd51534d944238112dfdaa89730c4] into tmpdir=hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/.tmp, totalSize=28.2 K 2024-12-08T05:50:44,966 DEBUG [RS:0;0d942cb2025d:36989-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6de45c20b1c24069bc6bfb6c49fca343, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733637038458 2024-12-08T05:50:44,966 DEBUG [RS:0;0d942cb2025d:36989-shortCompactions-0 {}] compactions.Compactor(225): Compacting 04e0134a2aa44006a873e9e9756badd4, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1733637042484 2024-12-08T05:50:44,967 DEBUG [RS:0;0d942cb2025d:36989-shortCompactions-0 {}] compactions.Compactor(225): Compacting b5fcd51534d944238112dfdaa89730c4, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1733637044520 2024-12-08T05:50:44,984 INFO [RS:0;0d942cb2025d:36989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a0d875ebeb3b6e27843ae87b1cb3e2f8#info#compaction#21 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T05:50:44,984 DEBUG [RS:0;0d942cb2025d:36989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/.tmp/info/14a8f2602b0f455284d17e599088f41e is 1080, key is row0002/info:/1733637038458/Put/seqid=0 2024-12-08T05:50:44,986 WARN [Thread-952 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741867_1050 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:44,986 WARN [Thread-952 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1312343861-172.17.0.2-1733637023632:blk_1073741867_1050 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40163,DS-c48f66e4-e0f6-4cef-a267-f504c8616ab7,DISK], DatanodeInfoWithStorage[127.0.0.1:39795,DS-97e2bfc6-c424-4d2c-99c4-a2f9a381994f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40163,DS-c48f66e4-e0f6-4cef-a267-f504c8616ab7,DISK]) is bad. 2024-12-08T05:50:44,986 WARN [Thread-952 {}] hdfs.DataStreamer(1850): Abandoning BP-1312343861-172.17.0.2-1733637023632:blk_1073741867_1050 2024-12-08T05:50:44,987 WARN [Thread-952 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40163,DS-c48f66e4-e0f6-4cef-a267-f504c8616ab7,DISK] 2024-12-08T05:50:44,988 WARN [Thread-952 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741868_1051 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:44,988 WARN [Thread-952 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1312343861-172.17.0.2-1733637023632:blk_1073741868_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK], DatanodeInfoWithStorage[127.0.0.1:35443,DS-04b517c0-4e1b-48ef-adae-ca58306a99bb,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK]) is bad. 2024-12-08T05:50:44,988 WARN [Thread-952 {}] hdfs.DataStreamer(1850): Abandoning BP-1312343861-172.17.0.2-1733637023632:blk_1073741868_1051 2024-12-08T05:50:44,989 WARN [Thread-952 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK] 2024-12-08T05:50:44,990 WARN [Thread-952 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:44,990 WARN [Thread-952 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1312343861-172.17.0.2-1733637023632:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35443,DS-04b517c0-4e1b-48ef-adae-ca58306a99bb,DISK], DatanodeInfoWithStorage[127.0.0.1:39795,DS-97e2bfc6-c424-4d2c-99c4-a2f9a381994f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35443,DS-04b517c0-4e1b-48ef-adae-ca58306a99bb,DISK]) is bad. 2024-12-08T05:50:44,990 WARN [Thread-952 {}] hdfs.DataStreamer(1850): Abandoning BP-1312343861-172.17.0.2-1733637023632:blk_1073741869_1052 2024-12-08T05:50:44,990 WARN [Thread-952 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35443,DS-04b517c0-4e1b-48ef-adae-ca58306a99bb,DISK] 2024-12-08T05:50:44,992 WARN [Thread-952 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40747 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:44,992 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-134678083_22 at /127.0.0.1:60688 [Receiving block BP-1312343861-172.17.0.2-1733637023632:blk_1073741870_1053] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/cluster_ad2183ec-0874-c6b8-5c7f-1444c5a18074/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/cluster_ad2183ec-0874-c6b8-5c7f-1444c5a18074/data/data6]'}, localName='127.0.0.1:39795', datanodeUuid='107d6efa-7b49-4521-92ae-23356ed2ace3', xmitsInProgress=0}:Exception transferring block BP-1312343861-172.17.0.2-1733637023632:blk_1073741870_1053 to mirror 127.0.0.1:40747 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:50:44,993 WARN [Thread-952 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1312343861-172.17.0.2-1733637023632:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39795,DS-97e2bfc6-c424-4d2c-99c4-a2f9a381994f,DISK], DatanodeInfoWithStorage[127.0.0.1:40747,DS-3ebf26fc-f51c-4143-acc9-f93fae9ced93,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40747,DS-3ebf26fc-f51c-4143-acc9-f93fae9ced93,DISK]) is bad. 2024-12-08T05:50:44,993 WARN [Thread-952 {}] hdfs.DataStreamer(1850): Abandoning BP-1312343861-172.17.0.2-1733637023632:blk_1073741870_1053 2024-12-08T05:50:44,993 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-134678083_22 at /127.0.0.1:60688 [Receiving block BP-1312343861-172.17.0.2-1733637023632:blk_1073741870_1053] {}] datanode.BlockReceiver(316): Block 1073741870 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-08T05:50:44,993 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-134678083_22 at /127.0.0.1:60688 [Receiving block BP-1312343861-172.17.0.2-1733637023632:blk_1073741870_1053] {}] datanode.DataXceiver(331): 127.0.0.1:39795:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60688 dst: /127.0.0.1:39795 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:50:44,995 WARN [Thread-952 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40747,DS-3ebf26fc-f51c-4143-acc9-f93fae9ced93,DISK] 2024-12-08T05:50:44,996 WARN [IPC Server handler 0 on default port 46561 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-08T05:50:44,996 WARN [IPC Server handler 0 on default port 46561 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-08T05:50:44,996 WARN [IPC Server handler 0 on default port 46561 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-08T05:50:45,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39795 is added to blk_1073741871_1054 (size=17994) 2024-12-08T05:50:45,095 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@2511e850[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39795, datanodeUuid=107d6efa-7b49-4521-92ae-23356ed2ace3, infoPort=36929, infoSecurePort=0, ipcPort=43951, storageInfo=lv=-57;cid=testClusterID;nsid=341549530;c=1733637023632):Failed to transfer BP-1312343861-172.17.0.2-1733637023632:blk_1073741841_1024 to 127.0.0.1:42141 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:50:45,095 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@2bd61282[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39795, datanodeUuid=107d6efa-7b49-4521-92ae-23356ed2ace3, infoPort=36929, infoSecurePort=0, ipcPort=43951, storageInfo=lv=-57;cid=testClusterID;nsid=341549530;c=1733637023632):Failed to transfer BP-1312343861-172.17.0.2-1733637023632:blk_1073741851_1034 to 127.0.0.1:40163 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:50:45,412 DEBUG [RS:0;0d942cb2025d:36989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/.tmp/info/14a8f2602b0f455284d17e599088f41e as hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/info/14a8f2602b0f455284d17e599088f41e 2024-12-08T05:50:45,420 INFO [RS:0;0d942cb2025d:36989-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in a0d875ebeb3b6e27843ae87b1cb3e2f8/info of a0d875ebeb3b6e27843ae87b1cb3e2f8 into 14a8f2602b0f455284d17e599088f41e(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T05:50:45,420 DEBUG [RS:0;0d942cb2025d:36989-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for a0d875ebeb3b6e27843ae87b1cb3e2f8: 2024-12-08T05:50:45,420 INFO [RS:0;0d942cb2025d:36989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1733637025761.a0d875ebeb3b6e27843ae87b1cb3e2f8., storeName=a0d875ebeb3b6e27843ae87b1cb3e2f8/info, priority=13, startTime=1733637044963; duration=0sec 2024-12-08T05:50:45,420 DEBUG [RS:0;0d942cb2025d:36989-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-12-08T05:50:45,420 DEBUG [RS:0;0d942cb2025d:36989-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T05:50:45,421 DEBUG [RS:0;0d942cb2025d:36989-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/info/14a8f2602b0f455284d17e599088f41e because midkey is the same as first or last row 2024-12-08T05:50:45,421 DEBUG [RS:0;0d942cb2025d:36989-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-12-08T05:50:45,421 DEBUG [RS:0;0d942cb2025d:36989-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T05:50:45,421 DEBUG [RS:0;0d942cb2025d:36989-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/info/14a8f2602b0f455284d17e599088f41e because midkey is the same as first or last row 2024-12-08T05:50:45,421 DEBUG [RS:0;0d942cb2025d:36989-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-12-08T05:50:45,421 DEBUG [RS:0;0d942cb2025d:36989-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T05:50:45,421 DEBUG [RS:0;0d942cb2025d:36989-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/info/14a8f2602b0f455284d17e599088f41e because midkey is the same as first or last row 2024-12-08T05:50:45,421 DEBUG [RS:0;0d942cb2025d:36989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T05:50:45,421 DEBUG [RS:0;0d942cb2025d:36989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a0d875ebeb3b6e27843ae87b1cb3e2f8:info 2024-12-08T05:50:45,722 INFO [regionserver/0d942cb2025d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:45,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36989 {}] regionserver.HRegion(8855): Flush requested on a0d875ebeb3b6e27843ae87b1cb3e2f8 2024-12-08T05:50:45,940 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a0d875ebeb3b6e27843ae87b1cb3e2f8 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-08T05:50:45,945 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/.tmp/info/e3306c858d50453db89b88e303420cdb is 1079, key is tmprow/info:/1733637045939/Put/seqid=0 2024-12-08T05:50:45,947 WARN [Thread-961 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741872_1055 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:45,947 WARN [Thread-961 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1312343861-172.17.0.2-1733637023632:blk_1073741872_1055 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35443,DS-04b517c0-4e1b-48ef-adae-ca58306a99bb,DISK], DatanodeInfoWithStorage[127.0.0.1:39795,DS-97e2bfc6-c424-4d2c-99c4-a2f9a381994f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35443,DS-04b517c0-4e1b-48ef-adae-ca58306a99bb,DISK]) is bad. 2024-12-08T05:50:45,947 WARN [Thread-961 {}] hdfs.DataStreamer(1850): Abandoning BP-1312343861-172.17.0.2-1733637023632:blk_1073741872_1055 2024-12-08T05:50:45,948 WARN [Thread-961 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35443,DS-04b517c0-4e1b-48ef-adae-ca58306a99bb,DISK] 2024-12-08T05:50:45,949 WARN [Thread-961 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741873_1056 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:45,949 WARN [Thread-961 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1312343861-172.17.0.2-1733637023632:blk_1073741873_1056 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40163,DS-c48f66e4-e0f6-4cef-a267-f504c8616ab7,DISK], DatanodeInfoWithStorage[127.0.0.1:40747,DS-3ebf26fc-f51c-4143-acc9-f93fae9ced93,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40163,DS-c48f66e4-e0f6-4cef-a267-f504c8616ab7,DISK]) is bad. 2024-12-08T05:50:45,949 WARN [Thread-961 {}] hdfs.DataStreamer(1850): Abandoning BP-1312343861-172.17.0.2-1733637023632:blk_1073741873_1056 2024-12-08T05:50:45,949 WARN [Thread-961 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40163,DS-c48f66e4-e0f6-4cef-a267-f504c8616ab7,DISK] 2024-12-08T05:50:45,950 WARN [Thread-961 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:45,951 WARN [Thread-961 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1312343861-172.17.0.2-1733637023632:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40747,DS-3ebf26fc-f51c-4143-acc9-f93fae9ced93,DISK], DatanodeInfoWithStorage[127.0.0.1:39795,DS-97e2bfc6-c424-4d2c-99c4-a2f9a381994f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40747,DS-3ebf26fc-f51c-4143-acc9-f93fae9ced93,DISK]) is bad. 2024-12-08T05:50:45,951 WARN [Thread-961 {}] hdfs.DataStreamer(1850): Abandoning BP-1312343861-172.17.0.2-1733637023632:blk_1073741874_1057 2024-12-08T05:50:45,951 WARN [Thread-961 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40747,DS-3ebf26fc-f51c-4143-acc9-f93fae9ced93,DISK] 2024-12-08T05:50:45,953 WARN [Thread-961 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:42141 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:45,953 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-134678083_22 at /127.0.0.1:60708 [Receiving block BP-1312343861-172.17.0.2-1733637023632:blk_1073741875_1058] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/cluster_ad2183ec-0874-c6b8-5c7f-1444c5a18074/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/cluster_ad2183ec-0874-c6b8-5c7f-1444c5a18074/data/data6]'}, localName='127.0.0.1:39795', datanodeUuid='107d6efa-7b49-4521-92ae-23356ed2ace3', xmitsInProgress=0}:Exception transferring block BP-1312343861-172.17.0.2-1733637023632:blk_1073741875_1058 to mirror 127.0.0.1:42141 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:50:45,953 WARN [Thread-961 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1312343861-172.17.0.2-1733637023632:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39795,DS-97e2bfc6-c424-4d2c-99c4-a2f9a381994f,DISK], DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK]) is bad. 2024-12-08T05:50:45,953 WARN [Thread-961 {}] hdfs.DataStreamer(1850): Abandoning BP-1312343861-172.17.0.2-1733637023632:blk_1073741875_1058 2024-12-08T05:50:45,953 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-134678083_22 at /127.0.0.1:60708 [Receiving block BP-1312343861-172.17.0.2-1733637023632:blk_1073741875_1058] {}] datanode.BlockReceiver(316): Block 1073741875 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-08T05:50:45,953 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-134678083_22 at /127.0.0.1:60708 [Receiving block BP-1312343861-172.17.0.2-1733637023632:blk_1073741875_1058] {}] datanode.DataXceiver(331): 127.0.0.1:39795:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60708 dst: /127.0.0.1:39795 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:50:45,954 WARN [Thread-961 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK] 2024-12-08T05:50:45,955 WARN [IPC Server handler 2 on default port 46561 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-08T05:50:45,955 WARN [IPC Server handler 2 on default port 46561 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-08T05:50:45,955 WARN [IPC Server handler 2 on default port 46561 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-08T05:50:45,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39795 is added to blk_1073741876_1059 (size=6027) 2024-12-08T05:50:46,095 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@2511e850[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39795, datanodeUuid=107d6efa-7b49-4521-92ae-23356ed2ace3, infoPort=36929, infoSecurePort=0, ipcPort=43951, storageInfo=lv=-57;cid=testClusterID;nsid=341549530;c=1733637023632):Failed to transfer BP-1312343861-172.17.0.2-1733637023632:blk_1073741847_1030 to 127.0.0.1:42141 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:50:46,095 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@2bd61282[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39795, datanodeUuid=107d6efa-7b49-4521-92ae-23356ed2ace3, infoPort=36929, infoSecurePort=0, ipcPort=43951, storageInfo=lv=-57;cid=testClusterID;nsid=341549530;c=1733637023632):Failed to transfer BP-1312343861-172.17.0.2-1733637023632:blk_1073741856_1039 to 127.0.0.1:40163 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:50:46,359 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/.tmp/info/e3306c858d50453db89b88e303420cdb 2024-12-08T05:50:46,366 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/.tmp/info/e3306c858d50453db89b88e303420cdb as hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/info/e3306c858d50453db89b88e303420cdb 2024-12-08T05:50:46,372 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/info/e3306c858d50453db89b88e303420cdb, entries=1, sequenceid=45, filesize=5.9 K 2024-12-08T05:50:46,373 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for a0d875ebeb3b6e27843ae87b1cb3e2f8 in 433ms, sequenceid=45, compaction requested=false 2024-12-08T05:50:46,374 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a0d875ebeb3b6e27843ae87b1cb3e2f8: 2024-12-08T05:50:46,374 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-12-08T05:50:46,374 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T05:50:46,374 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/info/14a8f2602b0f455284d17e599088f41e because midkey is the same as first or last row 2024-12-08T05:50:46,541 WARN [regionserver/0d942cb2025d:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-12-08T05:50:46,541 INFO [regionserver/0d942cb2025d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:46,561 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T05:50:46,565 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T05:50:46,569 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T05:50:46,569 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T05:50:46,569 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T05:50:46,573 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2ed2ea19{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/hadoop.log.dir/,AVAILABLE} 2024-12-08T05:50:46,573 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3a864bf5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T05:50:46,690 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@507ee43d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/java.io.tmpdir/jetty-localhost-36663-hadoop-hdfs-3_4_1-tests_jar-_-any-13862051428397822279/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T05:50:46,690 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5319e63c{HTTP/1.1, (http/1.1)}{localhost:36663} 2024-12-08T05:50:46,690 INFO [Time-limited test {}] server.Server(415): Started @126874ms 2024-12-08T05:50:46,692 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T05:50:46,697 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:46,804 WARN [Thread-980 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T05:50:46,812 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbe5ccefa37e7a90d with lease ID 0x7206f96d451258bb: from storage DS-3ebf26fc-f51c-4143-acc9-f93fae9ced93 node DatanodeRegistration(127.0.0.1:37825, datanodeUuid=efd53946-5941-41e2-9b0b-bda82ebd7dd4, infoPort=36455, infoSecurePort=0, ipcPort=37531, storageInfo=lv=-57;cid=testClusterID;nsid=341549530;c=1733637023632), blocks: 7, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-08T05:50:46,812 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbe5ccefa37e7a90d with lease ID 0x7206f96d451258bb: from storage DS-aafb9aab-3773-41c8-8d30-a00f0165e9d6 node DatanodeRegistration(127.0.0.1:37825, datanodeUuid=efd53946-5941-41e2-9b0b-bda82ebd7dd4, infoPort=36455, infoSecurePort=0, ipcPort=37531, storageInfo=lv=-57;cid=testClusterID;nsid=341549530;c=1733637023632), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T05:50:47,722 INFO [regionserver/0d942cb2025d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:48,095 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@2511e850[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39795, datanodeUuid=107d6efa-7b49-4521-92ae-23356ed2ace3, infoPort=36929, infoSecurePort=0, ipcPort=43951, storageInfo=lv=-57;cid=testClusterID;nsid=341549530;c=1733637023632):Failed to transfer BP-1312343861-172.17.0.2-1733637023632:blk_1073741871_1054 to 127.0.0.1:35443 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:50:48,095 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@2bd61282[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39795, datanodeUuid=107d6efa-7b49-4521-92ae-23356ed2ace3, infoPort=36929, infoSecurePort=0, ipcPort=43951, storageInfo=lv=-57;cid=testClusterID;nsid=341549530;c=1733637023632):Failed to transfer BP-1312343861-172.17.0.2-1733637023632:blk_1073741866_1049 to 127.0.0.1:40163 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:50:48,541 INFO [regionserver/0d942cb2025d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:48,697 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:49,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37825 is added to blk_1073741876_1059 (size=6027) 2024-12-08T05:50:49,723 INFO [regionserver/0d942cb2025d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:50,541 INFO [regionserver/0d942cb2025d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:50,698 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:51,723 INFO [regionserver/0d942cb2025d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:52,542 INFO [regionserver/0d942cb2025d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:52,698 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:53,723 INFO [regionserver/0d942cb2025d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:54,491 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-08T05:50:54,542 INFO [regionserver/0d942cb2025d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:54,698 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:54,850 ERROR [FSHLog-0-hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/MasterData-prefix:0d942cb2025d,40185,1733637024511 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:54,850 WARN [FSHLog-0-hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/MasterData-prefix:0d942cb2025d,40185,1733637024511 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:54,850 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 0d942cb2025d%2C40185%2C1733637024511:(num 1733637024730) roll requested 2024-12-08T05:50:54,851 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0d942cb2025d%2C40185%2C1733637024511.1733637054850 2024-12-08T05:50:54,855 WARN [Thread-1001 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741877_1060 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35443 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:54,855 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-375756593_22 at /127.0.0.1:41542 [Receiving block BP-1312343861-172.17.0.2-1733637023632:blk_1073741877_1060] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/cluster_ad2183ec-0874-c6b8-5c7f-1444c5a18074/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/cluster_ad2183ec-0874-c6b8-5c7f-1444c5a18074/data/data4]'}, localName='127.0.0.1:37825', datanodeUuid='efd53946-5941-41e2-9b0b-bda82ebd7dd4', xmitsInProgress=0}:Exception transferring block BP-1312343861-172.17.0.2-1733637023632:blk_1073741877_1060 to mirror 127.0.0.1:35443 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:50:54,855 WARN [Thread-1001 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1312343861-172.17.0.2-1733637023632:blk_1073741877_1060 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37825,DS-3ebf26fc-f51c-4143-acc9-f93fae9ced93,DISK], DatanodeInfoWithStorage[127.0.0.1:35443,DS-04b517c0-4e1b-48ef-adae-ca58306a99bb,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35443,DS-04b517c0-4e1b-48ef-adae-ca58306a99bb,DISK]) is bad. 2024-12-08T05:50:54,855 WARN [Thread-1001 {}] hdfs.DataStreamer(1850): Abandoning BP-1312343861-172.17.0.2-1733637023632:blk_1073741877_1060 2024-12-08T05:50:54,855 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-375756593_22 at /127.0.0.1:41542 [Receiving block BP-1312343861-172.17.0.2-1733637023632:blk_1073741877_1060] {}] datanode.BlockReceiver(316): Block 1073741877 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-08T05:50:54,855 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-375756593_22 at /127.0.0.1:41542 [Receiving block BP-1312343861-172.17.0.2-1733637023632:blk_1073741877_1060] {}] datanode.DataXceiver(331): 127.0.0.1:37825:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41542 dst: /127.0.0.1:37825 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:50:54,856 WARN [Thread-1001 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35443,DS-04b517c0-4e1b-48ef-adae-ca58306a99bb,DISK] 2024-12-08T05:50:54,857 WARN [Thread-1001 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741878_1061 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:54,857 WARN [Thread-1001 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1312343861-172.17.0.2-1733637023632:blk_1073741878_1061 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK], DatanodeInfoWithStorage[127.0.0.1:40163,DS-c48f66e4-e0f6-4cef-a267-f504c8616ab7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK]) is bad. 2024-12-08T05:50:54,857 WARN [Thread-1001 {}] hdfs.DataStreamer(1850): Abandoning BP-1312343861-172.17.0.2-1733637023632:blk_1073741878_1061 2024-12-08T05:50:54,857 WARN [Thread-1001 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK] 2024-12-08T05:50:54,861 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:54,862 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:54,862 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:54,862 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:54,862 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:50:54,862 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/MasterData/WALs/0d942cb2025d,40185,1733637024511/0d942cb2025d%2C40185%2C1733637024511.1733637024730 with entries=54, filesize=26.67 KB; new WAL /user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/MasterData/WALs/0d942cb2025d,40185,1733637024511/0d942cb2025d%2C40185%2C1733637024511.1733637054850 2024-12-08T05:50:54,862 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:54,863 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:54,863 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/MasterData/WALs/0d942cb2025d,40185,1733637024511/0d942cb2025d%2C40185%2C1733637024511.1733637024730 2024-12-08T05:50:54,863 WARN [IPC Server handler 1 on default port 46561 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/MasterData/WALs/0d942cb2025d,40185,1733637024511/0d942cb2025d%2C40185%2C1733637024511.1733637024730 has not been closed. Lease recovery is in progress. RecoveryId = 1063 for block blk_1073741830_1006 2024-12-08T05:50:54,863 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36929:36929),(127.0.0.1/127.0.0.1:36455:36455)] 2024-12-08T05:50:54,863 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/MasterData/WALs/0d942cb2025d,40185,1733637024511/0d942cb2025d%2C40185%2C1733637024511.1733637024730 is not closed yet, will try archiving it next time 2024-12-08T05:50:54,863 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/MasterData/WALs/0d942cb2025d,40185,1733637024511/0d942cb2025d%2C40185%2C1733637024511.1733637024730 after 0ms 2024-12-08T05:50:55,724 INFO [regionserver/0d942cb2025d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:56,542 INFO [regionserver/0d942cb2025d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:56,828 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@208c1f96 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1312343861-172.17.0.2-1733637023632:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:42141,null,null]) java.net.ConnectException: Call From 0d942cb2025d/172.17.0.2 to localhost:40781 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-08T05:50:56,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37825 is added to blk_1073741833_1019 (size=455) 2024-12-08T05:50:57,468 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.1733637025096 to hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/oldWALs/0d942cb2025d%2C36989%2C1733637024575.1733637025096 2024-12-08T05:50:57,470 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.1733637042493 to hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/oldWALs/0d942cb2025d%2C36989%2C1733637024575.1733637042493 2024-12-08T05:50:57,724 INFO [regionserver/0d942cb2025d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:57,811 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@11e6dc08[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37825, datanodeUuid=efd53946-5941-41e2-9b0b-bda82ebd7dd4, infoPort=36455, infoSecurePort=0, ipcPort=37531, storageInfo=lv=-57;cid=testClusterID;nsid=341549530;c=1733637023632):Failed to transfer BP-1312343861-172.17.0.2-1733637023632:blk_1073741833_1019 to 127.0.0.1:42141 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:50:58,543 INFO [regionserver/0d942cb2025d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:50:58,865 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/MasterData/WALs/0d942cb2025d,40185,1733637024511/0d942cb2025d%2C40185%2C1733637024511.1733637024730 after 4002ms 2024-12-08T05:50:59,724 INFO [regionserver/0d942cb2025d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:51:00,543 INFO [regionserver/0d942cb2025d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:51:00,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39795 is added to blk_1073741835_1011 (size=393) 2024-12-08T05:51:00,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39795 is added to blk_1073741831_1007 (size=1321) 2024-12-08T05:51:01,725 INFO [regionserver/0d942cb2025d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:51:01,811 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@11e6dc08[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37825, datanodeUuid=efd53946-5941-41e2-9b0b-bda82ebd7dd4, infoPort=36455, infoSecurePort=0, ipcPort=37531, storageInfo=lv=-57;cid=testClusterID;nsid=341549530;c=1733637023632):Failed to transfer BP-1312343861-172.17.0.2-1733637023632:blk_1073741827_1003 to 127.0.0.1:35443 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:51:01,811 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@40a3e0d9[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37825, datanodeUuid=efd53946-5941-41e2-9b0b-bda82ebd7dd4, infoPort=36455, infoSecurePort=0, ipcPort=37531, storageInfo=lv=-57;cid=testClusterID;nsid=341549530;c=1733637023632):Failed to transfer BP-1312343861-172.17.0.2-1733637023632:blk_1073741829_1005 to 127.0.0.1:35443 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:51:02,450 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0d942cb2025d%2C36989%2C1733637024575.1733637062450 2024-12-08T05:51:02,453 WARN [Thread-1016 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741880_1064 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:51:02,453 WARN [Thread-1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1312343861-172.17.0.2-1733637023632:blk_1073741880_1064 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35443,DS-04b517c0-4e1b-48ef-adae-ca58306a99bb,DISK], DatanodeInfoWithStorage[127.0.0.1:37825,DS-3ebf26fc-f51c-4143-acc9-f93fae9ced93,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35443,DS-04b517c0-4e1b-48ef-adae-ca58306a99bb,DISK]) is bad. 2024-12-08T05:51:02,453 WARN [Thread-1016 {}] hdfs.DataStreamer(1850): Abandoning BP-1312343861-172.17.0.2-1733637023632:blk_1073741880_1064 2024-12-08T05:51:02,454 WARN [Thread-1016 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35443,DS-04b517c0-4e1b-48ef-adae-ca58306a99bb,DISK] 2024-12-08T05:51:02,458 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:02,459 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:02,459 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:02,459 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:02,459 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:02,459 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.1733637044515 with entries=13, filesize=11.81 KB; new WAL /user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.1733637062450 2024-12-08T05:51:02,460 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36929:36929),(127.0.0.1/127.0.0.1:36455:36455)] 2024-12-08T05:51:02,460 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.1733637044515 is not closed yet, will try archiving it next time 2024-12-08T05:51:02,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39795 is added to blk_1073741861_1044 (size=12100) 2024-12-08T05:51:02,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36989 {}] regionserver.HRegion(8855): Flush requested on a0d875ebeb3b6e27843ae87b1cb3e2f8 2024-12-08T05:51:02,470 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a0d875ebeb3b6e27843ae87b1cb3e2f8 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-08T05:51:02,475 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/.tmp/info/7153277598eb4fa9b502c46535c00155 is 1080, key is row0013/info:/1733637062462/Put/seqid=0 2024-12-08T05:51:02,480 WARN [Thread-1022 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741882_1066 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35443 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:51:02,480 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-134678083_22 at /127.0.0.1:37054 [Receiving block BP-1312343861-172.17.0.2-1733637023632:blk_1073741882_1066] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/cluster_ad2183ec-0874-c6b8-5c7f-1444c5a18074/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/cluster_ad2183ec-0874-c6b8-5c7f-1444c5a18074/data/data6]'}, localName='127.0.0.1:39795', datanodeUuid='107d6efa-7b49-4521-92ae-23356ed2ace3', xmitsInProgress=0}:Exception transferring block BP-1312343861-172.17.0.2-1733637023632:blk_1073741882_1066 to mirror 127.0.0.1:35443 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:51:02,481 WARN [Thread-1022 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1312343861-172.17.0.2-1733637023632:blk_1073741882_1066 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39795,DS-97e2bfc6-c424-4d2c-99c4-a2f9a381994f,DISK], DatanodeInfoWithStorage[127.0.0.1:35443,DS-04b517c0-4e1b-48ef-adae-ca58306a99bb,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35443,DS-04b517c0-4e1b-48ef-adae-ca58306a99bb,DISK]) is bad. 2024-12-08T05:51:02,481 WARN [Thread-1022 {}] hdfs.DataStreamer(1850): Abandoning BP-1312343861-172.17.0.2-1733637023632:blk_1073741882_1066 2024-12-08T05:51:02,481 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-134678083_22 at /127.0.0.1:37054 [Receiving block BP-1312343861-172.17.0.2-1733637023632:blk_1073741882_1066] {}] datanode.BlockReceiver(316): Block 1073741882 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-08T05:51:02,481 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-134678083_22 at /127.0.0.1:37054 [Receiving block BP-1312343861-172.17.0.2-1733637023632:blk_1073741882_1066] {}] datanode.DataXceiver(331): 127.0.0.1:39795:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37054 dst: /127.0.0.1:39795 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:51:02,481 WARN [Thread-1022 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35443,DS-04b517c0-4e1b-48ef-adae-ca58306a99bb,DISK] 2024-12-08T05:51:02,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39795 is added to blk_1073741883_1067 (size=11421) 2024-12-08T05:51:02,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37825 is added to blk_1073741883_1067 (size=11421) 2024-12-08T05:51:02,486 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/.tmp/info/7153277598eb4fa9b502c46535c00155 2024-12-08T05:51:02,493 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/.tmp/info/7153277598eb4fa9b502c46535c00155 as hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/info/7153277598eb4fa9b502c46535c00155 2024-12-08T05:51:02,498 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/info/7153277598eb4fa9b502c46535c00155, entries=6, sequenceid=55, filesize=11.2 K 2024-12-08T05:51:02,499 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7530, heapSize ~8.11 KB/8304, currentSize=6.30 KB/6455 for a0d875ebeb3b6e27843ae87b1cb3e2f8 in 29ms, sequenceid=55, compaction requested=true 2024-12-08T05:51:02,499 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a0d875ebeb3b6e27843ae87b1cb3e2f8: 2024-12-08T05:51:02,499 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=34.6 K, sizeToCheck=16.0 K 2024-12-08T05:51:02,500 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T05:51:02,500 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/info/14a8f2602b0f455284d17e599088f41e because midkey is the same as first or last row 2024-12-08T05:51:02,500 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a0d875ebeb3b6e27843ae87b1cb3e2f8:info, priority=-2147483648, current under compaction store size is 1 2024-12-08T05:51:02,500 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T05:51:02,500 DEBUG [RS:0;0d942cb2025d:36989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T05:51:02,501 DEBUG [RS:0;0d942cb2025d:36989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35442 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T05:51:02,501 DEBUG [RS:0;0d942cb2025d:36989-shortCompactions-0 {}] regionserver.HStore(1541): a0d875ebeb3b6e27843ae87b1cb3e2f8/info is initiating minor compaction (all files) 2024-12-08T05:51:02,501 INFO [RS:0;0d942cb2025d:36989-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of a0d875ebeb3b6e27843ae87b1cb3e2f8/info in TestLogRolling-testLogRollOnDatanodeDeath,,1733637025761.a0d875ebeb3b6e27843ae87b1cb3e2f8. 2024-12-08T05:51:02,501 INFO [RS:0;0d942cb2025d:36989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/info/14a8f2602b0f455284d17e599088f41e, hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/info/e3306c858d50453db89b88e303420cdb, hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/info/7153277598eb4fa9b502c46535c00155] into tmpdir=hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/.tmp, totalSize=34.6 K 2024-12-08T05:51:02,501 DEBUG [RS:0;0d942cb2025d:36989-shortCompactions-0 {}] compactions.Compactor(225): Compacting 14a8f2602b0f455284d17e599088f41e, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1733637038458 2024-12-08T05:51:02,502 DEBUG [RS:0;0d942cb2025d:36989-shortCompactions-0 {}] compactions.Compactor(225): Compacting e3306c858d50453db89b88e303420cdb, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1733637045939 2024-12-08T05:51:02,502 DEBUG [RS:0;0d942cb2025d:36989-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7153277598eb4fa9b502c46535c00155, keycount=6, bloomtype=ROW, size=11.2 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733637046344 2024-12-08T05:51:02,518 INFO [RS:0;0d942cb2025d:36989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a0d875ebeb3b6e27843ae87b1cb3e2f8#info#compaction#24 average throughput is 8.72 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T05:51:02,519 DEBUG [RS:0;0d942cb2025d:36989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/.tmp/info/d1b9148f06bd475c81a41975b4dc6993 is 1080, key is row0002/info:/1733637038458/Put/seqid=0 2024-12-08T05:51:02,520 WARN [Thread-1033 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741884_1068 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:51:02,520 WARN [Thread-1033 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1312343861-172.17.0.2-1733637023632:blk_1073741884_1068 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35443,DS-04b517c0-4e1b-48ef-adae-ca58306a99bb,DISK], DatanodeInfoWithStorage[127.0.0.1:39795,DS-97e2bfc6-c424-4d2c-99c4-a2f9a381994f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35443,DS-04b517c0-4e1b-48ef-adae-ca58306a99bb,DISK]) is bad. 2024-12-08T05:51:02,521 WARN [Thread-1033 {}] hdfs.DataStreamer(1850): Abandoning BP-1312343861-172.17.0.2-1733637023632:blk_1073741884_1068 2024-12-08T05:51:02,521 WARN [Thread-1033 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35443,DS-04b517c0-4e1b-48ef-adae-ca58306a99bb,DISK] 2024-12-08T05:51:02,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39795 is added to blk_1073741885_1069 (size=23502) 2024-12-08T05:51:02,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37825 is added to blk_1073741885_1069 (size=23502) 2024-12-08T05:51:02,534 DEBUG [RS:0;0d942cb2025d:36989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/.tmp/info/d1b9148f06bd475c81a41975b4dc6993 as hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/info/d1b9148f06bd475c81a41975b4dc6993 2024-12-08T05:51:02,541 INFO [RS:0;0d942cb2025d:36989-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in a0d875ebeb3b6e27843ae87b1cb3e2f8/info of a0d875ebeb3b6e27843ae87b1cb3e2f8 into d1b9148f06bd475c81a41975b4dc6993(size=23.0 K), total size for store is 23.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T05:51:02,541 DEBUG [RS:0;0d942cb2025d:36989-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for a0d875ebeb3b6e27843ae87b1cb3e2f8: 2024-12-08T05:51:02,541 INFO [RS:0;0d942cb2025d:36989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1733637025761.a0d875ebeb3b6e27843ae87b1cb3e2f8., storeName=a0d875ebeb3b6e27843ae87b1cb3e2f8/info, priority=13, startTime=1733637062500; duration=0sec 2024-12-08T05:51:02,542 DEBUG [RS:0;0d942cb2025d:36989-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-12-08T05:51:02,542 DEBUG [RS:0;0d942cb2025d:36989-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T05:51:02,542 DEBUG [RS:0;0d942cb2025d:36989-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/info/d1b9148f06bd475c81a41975b4dc6993 because midkey is the same as first or last row 2024-12-08T05:51:02,542 DEBUG [RS:0;0d942cb2025d:36989-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-12-08T05:51:02,542 DEBUG [RS:0;0d942cb2025d:36989-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T05:51:02,542 DEBUG [RS:0;0d942cb2025d:36989-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/info/d1b9148f06bd475c81a41975b4dc6993 because midkey is the same as first or last row 2024-12-08T05:51:02,542 DEBUG [RS:0;0d942cb2025d:36989-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-12-08T05:51:02,542 DEBUG [RS:0;0d942cb2025d:36989-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T05:51:02,542 DEBUG [RS:0;0d942cb2025d:36989-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/info/d1b9148f06bd475c81a41975b4dc6993 because midkey is the same as first or last row 2024-12-08T05:51:02,542 DEBUG [RS:0;0d942cb2025d:36989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T05:51:02,542 DEBUG [RS:0;0d942cb2025d:36989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a0d875ebeb3b6e27843ae87b1cb3e2f8:info 2024-12-08T05:51:02,544 INFO [regionserver/0d942cb2025d:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-12-08T05:51:02,544 INFO [regionserver/0d942cb2025d:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:51:02,683 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-08T05:51:02,684 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-08T05:51:02,684 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T05:51:02,684 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T05:51:02,684 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T05:51:02,684 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-08T05:51:02,684 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-08T05:51:02,684 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1223048069, stopped=false 2024-12-08T05:51:02,684 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=0d942cb2025d,40185,1733637024511 2024-12-08T05:51:02,686 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36443-0x101909f957d0002, quorum=127.0.0.1:49789, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T05:51:02,686 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36989-0x101909f957d0001, quorum=127.0.0.1:49789, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T05:51:02,686 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36443-0x101909f957d0002, quorum=127.0.0.1:49789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:02,686 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40185-0x101909f957d0000, quorum=127.0.0.1:49789, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T05:51:02,686 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36989-0x101909f957d0001, quorum=127.0.0.1:49789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:02,686 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-08T05:51:02,686 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40185-0x101909f957d0000, quorum=127.0.0.1:49789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:02,687 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-08T05:51:02,687 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T05:51:02,687 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T05:51:02,687 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '0d942cb2025d,36989,1733637024575' ***** 2024-12-08T05:51:02,687 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-08T05:51:02,687 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '0d942cb2025d,36443,1733637025646' ***** 2024-12-08T05:51:02,687 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-08T05:51:02,687 INFO [RS:0;0d942cb2025d:36989 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-08T05:51:02,687 INFO [RS:1;0d942cb2025d:36443 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-08T05:51:02,687 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36989-0x101909f957d0001, quorum=127.0.0.1:49789, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T05:51:02,688 INFO [RS:0;0d942cb2025d:36989 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-08T05:51:02,688 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-08T05:51:02,688 INFO [RS:0;0d942cb2025d:36989 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-08T05:51:02,688 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-08T05:51:02,688 INFO [RS:0;0d942cb2025d:36989 {}] regionserver.HRegionServer(3091): Received CLOSE for a0d875ebeb3b6e27843ae87b1cb3e2f8 2024-12-08T05:51:02,688 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36443-0x101909f957d0002, quorum=127.0.0.1:49789, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T05:51:02,688 INFO [RS:1;0d942cb2025d:36443 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-08T05:51:02,688 INFO [RS:1;0d942cb2025d:36443 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-08T05:51:02,688 INFO [RS:0;0d942cb2025d:36989 {}] regionserver.HRegionServer(959): stopping server 0d942cb2025d,36989,1733637024575 2024-12-08T05:51:02,688 INFO [RS:1;0d942cb2025d:36443 {}] regionserver.HRegionServer(959): stopping server 0d942cb2025d,36443,1733637025646 2024-12-08T05:51:02,688 INFO [RS:1;0d942cb2025d:36443 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T05:51:02,688 INFO [RS:0;0d942cb2025d:36989 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T05:51:02,688 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40185-0x101909f957d0000, quorum=127.0.0.1:49789, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T05:51:02,688 INFO [RS:1;0d942cb2025d:36443 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;0d942cb2025d:36443. 2024-12-08T05:51:02,688 INFO [RS:0;0d942cb2025d:36989 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;0d942cb2025d:36989. 2024-12-08T05:51:02,688 DEBUG [RS:1;0d942cb2025d:36443 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T05:51:02,688 DEBUG [RS:1;0d942cb2025d:36443 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T05:51:02,688 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing a0d875ebeb3b6e27843ae87b1cb3e2f8, disabling compactions & flushes 2024-12-08T05:51:02,688 DEBUG [RS:0;0d942cb2025d:36989 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T05:51:02,689 DEBUG [RS:0;0d942cb2025d:36989 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T05:51:02,689 INFO [RS:1;0d942cb2025d:36443 {}] regionserver.HRegionServer(976): stopping server 0d942cb2025d,36443,1733637025646; all regions closed. 2024-12-08T05:51:02,689 INFO [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1733637025761.a0d875ebeb3b6e27843ae87b1cb3e2f8. 2024-12-08T05:51:02,689 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733637025761.a0d875ebeb3b6e27843ae87b1cb3e2f8. 2024-12-08T05:51:02,689 INFO [RS:0;0d942cb2025d:36989 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-08T05:51:02,689 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733637025761.a0d875ebeb3b6e27843ae87b1cb3e2f8. after waiting 0 ms 2024-12-08T05:51:02,689 INFO [RS:0;0d942cb2025d:36989 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-08T05:51:02,689 INFO [RS:0;0d942cb2025d:36989 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-08T05:51:02,689 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1733637025761.a0d875ebeb3b6e27843ae87b1cb3e2f8. 2024-12-08T05:51:02,689 INFO [RS:0;0d942cb2025d:36989 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-08T05:51:02,689 INFO [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing a0d875ebeb3b6e27843ae87b1cb3e2f8 1/1 column families, dataSize=6.30 KB heapSize=7 KB 2024-12-08T05:51:02,689 INFO [RS:0;0d942cb2025d:36989 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-08T05:51:02,689 DEBUG [RS:0;0d942cb2025d:36989 {}] regionserver.HRegionServer(1325): Online Regions={a0d875ebeb3b6e27843ae87b1cb3e2f8=TestLogRolling-testLogRollOnDatanodeDeath,,1733637025761.a0d875ebeb3b6e27843ae87b1cb3e2f8., 1588230740=hbase:meta,,1.1588230740} 2024-12-08T05:51:02,689 DEBUG [RS:0;0d942cb2025d:36989 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, a0d875ebeb3b6e27843ae87b1cb3e2f8 2024-12-08T05:51:02,689 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:02,690 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:02,690 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:02,690 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-08T05:51:02,690 INFO [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-08T05:51:02,690 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:02,690 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-08T05:51:02,690 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:02,690 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-08T05:51:02,690 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-08T05:51:02,690 INFO [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-12-08T05:51:02,690 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:51:02,690 ERROR [FSHLog-0-hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10-prefix:0d942cb2025d,36989,1733637024575.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:51:02,691 WARN [FSHLog-0-hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10-prefix:0d942cb2025d,36989,1733637024575.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:51:02,691 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:51:02,691 DEBUG [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 0d942cb2025d%2C36989%2C1733637024575.meta:.meta(num 1733637025501) roll requested 2024-12-08T05:51:02,691 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 2024-12-08T05:51:02,691 INFO [regionserver/0d942cb2025d:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0d942cb2025d%2C36989%2C1733637024575.meta.1733637062691.meta 2024-12-08T05:51:02,691 WARN [IPC Server handler 2 on default port 46561 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 has not been closed. Lease recovery is in progress. RecoveryId = 1070 for block blk_1073741837_1013 2024-12-08T05:51:02,691 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 after 0ms 2024-12-08T05:51:02,694 WARN [Thread-1039 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741886_1071 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:51:02,694 WARN [Thread-1039 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1312343861-172.17.0.2-1733637023632:blk_1073741886_1071 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35443,DS-04b517c0-4e1b-48ef-adae-ca58306a99bb,DISK], DatanodeInfoWithStorage[127.0.0.1:37825,DS-3ebf26fc-f51c-4143-acc9-f93fae9ced93,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35443,DS-04b517c0-4e1b-48ef-adae-ca58306a99bb,DISK]) is bad. 2024-12-08T05:51:02,694 WARN [Thread-1039 {}] hdfs.DataStreamer(1850): Abandoning BP-1312343861-172.17.0.2-1733637023632:blk_1073741886_1071 2024-12-08T05:51:02,694 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/.tmp/info/1a26cbac9b3c4a3ea6bfc58ad63ef720 is 1080, key is row0018/info:/1733637062471/Put/seqid=0 2024-12-08T05:51:02,694 WARN [Thread-1039 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35443,DS-04b517c0-4e1b-48ef-adae-ca58306a99bb,DISK] 2024-12-08T05:51:02,702 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:02,702 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:02,702 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:02,702 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:02,702 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:02,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39795 is added to blk_1073741887_1072 (size=11421) 2024-12-08T05:51:02,703 INFO [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637062691.meta 2024-12-08T05:51:02,703 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:51:02,703 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42141,DS-3efed6f0-3f49-41c2-9b48-24ce91b89e6a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:51:02,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37825 is added to blk_1073741887_1072 (size=11421) 2024-12-08T05:51:02,703 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta 2024-12-08T05:51:02,704 INFO [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.30 KB at sequenceid=65 (bloomFilter=true), to=hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/.tmp/info/1a26cbac9b3c4a3ea6bfc58ad63ef720 2024-12-08T05:51:02,704 WARN [IPC Server handler 3 on default port 46561 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta has not been closed. Lease recovery is in progress. RecoveryId = 1074 for block blk_1073741834_1010 2024-12-08T05:51:02,704 DEBUG [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36929:36929),(127.0.0.1/127.0.0.1:36455:36455)] 2024-12-08T05:51:02,704 DEBUG [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta is not closed yet, will try archiving it next time 2024-12-08T05:51:02,704 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta after 1ms 2024-12-08T05:51:02,710 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/.tmp/info/1a26cbac9b3c4a3ea6bfc58ad63ef720 as hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/info/1a26cbac9b3c4a3ea6bfc58ad63ef720 2024-12-08T05:51:02,717 INFO [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/info/1a26cbac9b3c4a3ea6bfc58ad63ef720, entries=6, sequenceid=65, filesize=11.2 K 2024-12-08T05:51:02,719 INFO [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~6.30 KB/6455, heapSize ~6.98 KB/7152, currentSize=0 B/0 for a0d875ebeb3b6e27843ae87b1cb3e2f8 in 29ms, sequenceid=65, compaction requested=false 2024-12-08T05:51:02,719 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733637025761.a0d875ebeb3b6e27843ae87b1cb3e2f8.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/info/6de45c20b1c24069bc6bfb6c49fca343, hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/info/04e0134a2aa44006a873e9e9756badd4, hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/info/14a8f2602b0f455284d17e599088f41e, hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/info/b5fcd51534d944238112dfdaa89730c4, hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/info/e3306c858d50453db89b88e303420cdb, hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/info/7153277598eb4fa9b502c46535c00155] to archive 2024-12-08T05:51:02,720 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733637025761.a0d875ebeb3b6e27843ae87b1cb3e2f8.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-08T05:51:02,721 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/hbase/meta/1588230740/.tmp/info/894e9a5791654ceba0bf5555da8754fc is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1733637025761.a0d875ebeb3b6e27843ae87b1cb3e2f8./info:regioninfo/1733637026145/Put/seqid=0 2024-12-08T05:51:02,722 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733637025761.a0d875ebeb3b6e27843ae87b1cb3e2f8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/info/6de45c20b1c24069bc6bfb6c49fca343 to hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/info/6de45c20b1c24069bc6bfb6c49fca343 2024-12-08T05:51:02,724 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733637025761.a0d875ebeb3b6e27843ae87b1cb3e2f8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/info/04e0134a2aa44006a873e9e9756badd4 to hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/info/04e0134a2aa44006a873e9e9756badd4 2024-12-08T05:51:02,724 WARN [Thread-1051 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741889_1075 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35443 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:51:02,724 WARN [Thread-1051 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1312343861-172.17.0.2-1733637023632:blk_1073741889_1075 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37825,DS-3ebf26fc-f51c-4143-acc9-f93fae9ced93,DISK], DatanodeInfoWithStorage[127.0.0.1:35443,DS-04b517c0-4e1b-48ef-adae-ca58306a99bb,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35443,DS-04b517c0-4e1b-48ef-adae-ca58306a99bb,DISK]) is bad. 2024-12-08T05:51:02,724 WARN [Thread-1051 {}] hdfs.DataStreamer(1850): Abandoning BP-1312343861-172.17.0.2-1733637023632:blk_1073741889_1075 2024-12-08T05:51:02,724 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-134678083_22 at /127.0.0.1:33180 [Receiving block BP-1312343861-172.17.0.2-1733637023632:blk_1073741889_1075] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/cluster_ad2183ec-0874-c6b8-5c7f-1444c5a18074/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/cluster_ad2183ec-0874-c6b8-5c7f-1444c5a18074/data/data4]'}, localName='127.0.0.1:37825', datanodeUuid='efd53946-5941-41e2-9b0b-bda82ebd7dd4', xmitsInProgress=0}:Exception transferring block BP-1312343861-172.17.0.2-1733637023632:blk_1073741889_1075 to mirror 127.0.0.1:35443 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:51:02,724 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-134678083_22 at /127.0.0.1:33180 [Receiving block BP-1312343861-172.17.0.2-1733637023632:blk_1073741889_1075] {}] datanode.BlockReceiver(316): Block 1073741889 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-08T05:51:02,725 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-134678083_22 at /127.0.0.1:33180 [Receiving block BP-1312343861-172.17.0.2-1733637023632:blk_1073741889_1075] {}] datanode.DataXceiver(331): 127.0.0.1:37825:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33180 dst: /127.0.0.1:37825 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:51:02,725 WARN [Thread-1051 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35443,DS-04b517c0-4e1b-48ef-adae-ca58306a99bb,DISK] 2024-12-08T05:51:02,725 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733637025761.a0d875ebeb3b6e27843ae87b1cb3e2f8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/info/14a8f2602b0f455284d17e599088f41e to hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/info/14a8f2602b0f455284d17e599088f41e 2024-12-08T05:51:02,727 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733637025761.a0d875ebeb3b6e27843ae87b1cb3e2f8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/info/b5fcd51534d944238112dfdaa89730c4 to hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/info/b5fcd51534d944238112dfdaa89730c4 2024-12-08T05:51:02,729 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733637025761.a0d875ebeb3b6e27843ae87b1cb3e2f8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/info/e3306c858d50453db89b88e303420cdb to hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/info/e3306c858d50453db89b88e303420cdb 2024-12-08T05:51:02,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37825 is added to blk_1073741890_1076 (size=7089) 2024-12-08T05:51:02,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39795 is added to blk_1073741890_1076 (size=7089) 2024-12-08T05:51:02,731 INFO [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/hbase/meta/1588230740/.tmp/info/894e9a5791654ceba0bf5555da8754fc 2024-12-08T05:51:02,731 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733637025761.a0d875ebeb3b6e27843ae87b1cb3e2f8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/info/7153277598eb4fa9b502c46535c00155 to hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/info/7153277598eb4fa9b502c46535c00155 2024-12-08T05:51:02,731 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733637025761.a0d875ebeb3b6e27843ae87b1cb3e2f8.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=0d942cb2025d:40185 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-12-08T05:51:02,732 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733637025761.a0d875ebeb3b6e27843ae87b1cb3e2f8.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [6de45c20b1c24069bc6bfb6c49fca343=10347, 04e0134a2aa44006a873e9e9756badd4=12506, 14a8f2602b0f455284d17e599088f41e=17994, b5fcd51534d944238112dfdaa89730c4=6027, e3306c858d50453db89b88e303420cdb=6027, 7153277598eb4fa9b502c46535c00155=11421] 2024-12-08T05:51:02,733 INFO [regionserver/0d942cb2025d:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-08T05:51:02,733 INFO [regionserver/0d942cb2025d:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-08T05:51:02,737 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a0d875ebeb3b6e27843ae87b1cb3e2f8/recovered.edits/68.seqid, newMaxSeqId=68, maxSeqId=1 2024-12-08T05:51:02,737 INFO [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733637025761.a0d875ebeb3b6e27843ae87b1cb3e2f8. 2024-12-08T05:51:02,737 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for a0d875ebeb3b6e27843ae87b1cb3e2f8: Waiting for close lock at 1733637062688Running coprocessor pre-close hooks at 1733637062688Disabling compacts and flushes for region at 1733637062688Disabling writes for close at 1733637062689 (+1 ms)Obtaining lock to block concurrent updates at 1733637062689Preparing flush snapshotting stores in a0d875ebeb3b6e27843ae87b1cb3e2f8 at 1733637062689Finished memstore snapshotting TestLogRolling-testLogRollOnDatanodeDeath,,1733637025761.a0d875ebeb3b6e27843ae87b1cb3e2f8., syncing WAL and waiting on mvcc, flushsize=dataSize=6455, getHeapSize=7152, getOffHeapSize=0, getCellsCount=6 at 1733637062689Flushing stores of TestLogRolling-testLogRollOnDatanodeDeath,,1733637025761.a0d875ebeb3b6e27843ae87b1cb3e2f8. at 1733637062690 (+1 ms)Flushing a0d875ebeb3b6e27843ae87b1cb3e2f8/info: creating writer at 1733637062690Flushing a0d875ebeb3b6e27843ae87b1cb3e2f8/info: appending metadata at 1733637062694 (+4 ms)Flushing a0d875ebeb3b6e27843ae87b1cb3e2f8/info: closing flushed file at 1733637062694Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3f680f86: reopening flushed file at 1733637062709 (+15 ms)Finished flush of dataSize ~6.30 KB/6455, heapSize ~6.98 KB/7152, currentSize=0 B/0 for a0d875ebeb3b6e27843ae87b1cb3e2f8 in 29ms, sequenceid=65, compaction requested=false at 1733637062719 (+10 ms)Writing region close event to WAL at 1733637062732 (+13 ms)Running coprocessor post-close hooks at 1733637062737 (+5 ms)Closed at 1733637062737 2024-12-08T05:51:02,738 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733637025761.a0d875ebeb3b6e27843ae87b1cb3e2f8. 2024-12-08T05:51:02,754 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/hbase/meta/1588230740/.tmp/ns/25d69fbddf11487db25da23250914854 is 43, key is default/ns:d/1733637025551/Put/seqid=0 2024-12-08T05:51:02,757 WARN [Thread-1059 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741891_1077 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35443 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:51:02,757 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-134678083_22 at /127.0.0.1:37148 [Receiving block BP-1312343861-172.17.0.2-1733637023632:blk_1073741891_1077] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/cluster_ad2183ec-0874-c6b8-5c7f-1444c5a18074/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/cluster_ad2183ec-0874-c6b8-5c7f-1444c5a18074/data/data6]'}, localName='127.0.0.1:39795', datanodeUuid='107d6efa-7b49-4521-92ae-23356ed2ace3', xmitsInProgress=0}:Exception transferring block BP-1312343861-172.17.0.2-1733637023632:blk_1073741891_1077 to mirror 127.0.0.1:35443 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:51:02,757 WARN [Thread-1059 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1312343861-172.17.0.2-1733637023632:blk_1073741891_1077 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39795,DS-97e2bfc6-c424-4d2c-99c4-a2f9a381994f,DISK], DatanodeInfoWithStorage[127.0.0.1:35443,DS-04b517c0-4e1b-48ef-adae-ca58306a99bb,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35443,DS-04b517c0-4e1b-48ef-adae-ca58306a99bb,DISK]) is bad. 2024-12-08T05:51:02,757 WARN [Thread-1059 {}] hdfs.DataStreamer(1850): Abandoning BP-1312343861-172.17.0.2-1733637023632:blk_1073741891_1077 2024-12-08T05:51:02,757 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-134678083_22 at /127.0.0.1:37148 [Receiving block BP-1312343861-172.17.0.2-1733637023632:blk_1073741891_1077] {}] datanode.BlockReceiver(316): Block 1073741891 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-08T05:51:02,758 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-134678083_22 at /127.0.0.1:37148 [Receiving block BP-1312343861-172.17.0.2-1733637023632:blk_1073741891_1077] {}] datanode.DataXceiver(331): 127.0.0.1:39795:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37148 dst: /127.0.0.1:39795 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:51:02,758 WARN [Thread-1059 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35443,DS-04b517c0-4e1b-48ef-adae-ca58306a99bb,DISK] 2024-12-08T05:51:02,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39795 is added to blk_1073741892_1078 (size=5153) 2024-12-08T05:51:02,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37825 is added to blk_1073741892_1078 (size=5153) 2024-12-08T05:51:02,763 INFO [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/hbase/meta/1588230740/.tmp/ns/25d69fbddf11487db25da23250914854 2024-12-08T05:51:02,783 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/hbase/meta/1588230740/.tmp/table/80df0dbb889e46b18eb8b64c19893e6a is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1733637026158/Put/seqid=0 2024-12-08T05:51:02,785 WARN [Thread-1066 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741893_1079 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:51:02,785 WARN [Thread-1066 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1312343861-172.17.0.2-1733637023632:blk_1073741893_1079 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35443,DS-04b517c0-4e1b-48ef-adae-ca58306a99bb,DISK], DatanodeInfoWithStorage[127.0.0.1:37825,DS-3ebf26fc-f51c-4143-acc9-f93fae9ced93,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35443,DS-04b517c0-4e1b-48ef-adae-ca58306a99bb,DISK]) is bad. 2024-12-08T05:51:02,785 WARN [Thread-1066 {}] hdfs.DataStreamer(1850): Abandoning BP-1312343861-172.17.0.2-1733637023632:blk_1073741893_1079 2024-12-08T05:51:02,786 WARN [Thread-1066 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35443,DS-04b517c0-4e1b-48ef-adae-ca58306a99bb,DISK] 2024-12-08T05:51:02,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39795 is added to blk_1073741894_1080 (size=5424) 2024-12-08T05:51:02,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37825 is added to blk_1073741894_1080 (size=5424) 2024-12-08T05:51:02,791 INFO [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/hbase/meta/1588230740/.tmp/table/80df0dbb889e46b18eb8b64c19893e6a 2024-12-08T05:51:02,798 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/hbase/meta/1588230740/.tmp/info/894e9a5791654ceba0bf5555da8754fc as hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/hbase/meta/1588230740/info/894e9a5791654ceba0bf5555da8754fc 2024-12-08T05:51:02,805 INFO [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/hbase/meta/1588230740/info/894e9a5791654ceba0bf5555da8754fc, entries=10, sequenceid=11, filesize=6.9 K 2024-12-08T05:51:02,806 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/hbase/meta/1588230740/.tmp/ns/25d69fbddf11487db25da23250914854 as hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/hbase/meta/1588230740/ns/25d69fbddf11487db25da23250914854 2024-12-08T05:51:02,812 INFO [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/hbase/meta/1588230740/ns/25d69fbddf11487db25da23250914854, entries=2, sequenceid=11, filesize=5.0 K 2024-12-08T05:51:02,813 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/hbase/meta/1588230740/.tmp/table/80df0dbb889e46b18eb8b64c19893e6a as hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/hbase/meta/1588230740/table/80df0dbb889e46b18eb8b64c19893e6a 2024-12-08T05:51:02,819 INFO [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/hbase/meta/1588230740/table/80df0dbb889e46b18eb8b64c19893e6a, entries=2, sequenceid=11, filesize=5.3 K 2024-12-08T05:51:02,821 INFO [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 131ms, sequenceid=11, compaction requested=false 2024-12-08T05:51:02,826 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-08T05:51:02,826 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-08T05:51:02,827 INFO [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-08T05:51:02,827 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733637062690Running coprocessor pre-close hooks at 1733637062690Disabling compacts and flushes for region at 1733637062690Disabling writes for close at 1733637062690Obtaining lock to block concurrent updates at 1733637062690Preparing flush snapshotting stores in 1588230740 at 1733637062690Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1733637062690Flushing stores of hbase:meta,,1.1588230740 at 1733637062704 (+14 ms)Flushing 1588230740/info: creating writer at 1733637062705 (+1 ms)Flushing 1588230740/info: appending metadata at 1733637062721 (+16 ms)Flushing 1588230740/info: closing flushed file at 1733637062721Flushing 1588230740/ns: creating writer at 1733637062738 (+17 ms)Flushing 1588230740/ns: appending metadata at 1733637062754 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1733637062754Flushing 1588230740/table: creating writer at 1733637062768 (+14 ms)Flushing 1588230740/table: appending metadata at 1733637062783 (+15 ms)Flushing 1588230740/table: closing flushed file at 1733637062783Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5d73120f: reopening flushed file at 1733637062797 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@497d273: reopening flushed file at 1733637062805 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@59d5102b: reopening flushed file at 1733637062813 (+8 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 131ms, sequenceid=11, compaction requested=false at 1733637062821 (+8 ms)Writing region close event to WAL at 1733637062822 (+1 ms)Running coprocessor post-close hooks at 1733637062826 (+4 ms)Closed at 1733637062827 (+1 ms) 2024-12-08T05:51:02,827 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-08T05:51:02,862 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.1733637044515 to hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/oldWALs/0d942cb2025d%2C36989%2C1733637024575.1733637044515 2024-12-08T05:51:02,890 INFO [RS:0;0d942cb2025d:36989 {}] regionserver.HRegionServer(976): stopping server 0d942cb2025d,36989,1733637024575; all regions closed. 2024-12-08T05:51:02,890 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:02,890 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:02,890 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:02,891 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:02,891 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:02,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37825 is added to blk_1073741888_1073 (size=825) 2024-12-08T05:51:02,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39795 is added to blk_1073741888_1073 (size=825) 2024-12-08T05:51:02,962 INFO [regionserver/0d942cb2025d:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T05:51:03,026 INFO [regionserver/0d942cb2025d:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-08T05:51:03,026 INFO [regionserver/0d942cb2025d:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-08T05:51:03,097 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@2bd61282[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39795, datanodeUuid=107d6efa-7b49-4521-92ae-23356ed2ace3, infoPort=36929, infoSecurePort=0, ipcPort=43951, storageInfo=lv=-57;cid=testClusterID;nsid=341549530;c=1733637023632):Failed to transfer BP-1312343861-172.17.0.2-1733637023632:blk_1073741861_1044 to 127.0.0.1:35443 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:51:03,725 INFO [regionserver/0d942cb2025d:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T05:51:03,811 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@11e6dc08[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37825, datanodeUuid=efd53946-5941-41e2-9b0b-bda82ebd7dd4, infoPort=36455, infoSecurePort=0, ipcPort=37531, storageInfo=lv=-57;cid=testClusterID;nsid=341549530;c=1733637023632):Failed to transfer BP-1312343861-172.17.0.2-1733637023632:blk_1073741828_1004 to 127.0.0.1:35443 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:51:03,811 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@40a3e0d9[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37825, datanodeUuid=efd53946-5941-41e2-9b0b-bda82ebd7dd4, infoPort=36455, infoSecurePort=0, ipcPort=37531, storageInfo=lv=-57;cid=testClusterID;nsid=341549530;c=1733637023632):Failed to transfer BP-1312343861-172.17.0.2-1733637023632:blk_1073741832_1008 to 127.0.0.1:35443 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:51:04,811 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@40a3e0d9[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37825, datanodeUuid=efd53946-5941-41e2-9b0b-bda82ebd7dd4, infoPort=36455, infoSecurePort=0, ipcPort=37531, storageInfo=lv=-57;cid=testClusterID;nsid=341549530;c=1733637023632):Failed to transfer BP-1312343861-172.17.0.2-1733637023632:blk_1073741826_1002 to 127.0.0.1:35443 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:51:05,571 INFO [master/0d942cb2025d:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-08T05:51:05,571 INFO [master/0d942cb2025d:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-08T05:51:05,648 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-12-08T05:51:05,649 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-08T05:51:05,649 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-08T05:51:06,692 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 after 4001ms 2024-12-08T05:51:06,705 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta after 4002ms 2024-12-08T05:51:06,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39795 is added to blk_1073741825_1001 (size=7) 2024-12-08T05:51:06,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39795 is added to blk_1073741836_1012 (size=76) 2024-12-08T05:51:06,832 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@7813b2da {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1312343861-172.17.0.2-1733637023632:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:42141,null,null]) java.net.ConnectException: Call From 0d942cb2025d/172.17.0.2 to localhost:40781 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-08T05:51:07,691 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-12-08T05:51:07,693 DEBUG [RS:1;0d942cb2025d:36443 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/oldWALs 2024-12-08T05:51:07,693 INFO [RS:1;0d942cb2025d:36443 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 0d942cb2025d%2C36443%2C1733637025646:(num 1733637025862) 2024-12-08T05:51:07,693 DEBUG [RS:1;0d942cb2025d:36443 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T05:51:07,693 INFO [RS:1;0d942cb2025d:36443 {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T05:51:07,693 INFO [RS:1;0d942cb2025d:36443 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T05:51:07,693 INFO [RS:1;0d942cb2025d:36443 {}] hbase.ChoreService(370): Chore service for: regionserver/0d942cb2025d:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-08T05:51:07,693 INFO [RS:1;0d942cb2025d:36443 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-08T05:51:07,693 INFO [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T05:51:07,693 INFO [RS:1;0d942cb2025d:36443 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-08T05:51:07,693 INFO [RS:1;0d942cb2025d:36443 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-08T05:51:07,694 INFO [RS:1;0d942cb2025d:36443 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T05:51:07,694 INFO [RS:1;0d942cb2025d:36443 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36443 2024-12-08T05:51:07,696 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36443-0x101909f957d0002, quorum=127.0.0.1:49789, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/0d942cb2025d,36443,1733637025646 2024-12-08T05:51:07,696 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40185-0x101909f957d0000, quorum=127.0.0.1:49789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T05:51:07,696 INFO [RS:1;0d942cb2025d:36443 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T05:51:07,697 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [0d942cb2025d,36443,1733637025646] 2024-12-08T05:51:07,698 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:07,700 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/0d942cb2025d,36443,1733637025646 already deleted, retry=false 2024-12-08T05:51:07,700 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 0d942cb2025d,36443,1733637025646 expired; onlineServers=1 2024-12-08T05:51:07,740 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:07,755 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:07,756 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:07,757 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:07,757 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:07,757 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:07,768 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:07,768 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:07,798 INFO [RS:1;0d942cb2025d:36443 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T05:51:07,798 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36443-0x101909f957d0002, quorum=127.0.0.1:49789, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T05:51:07,798 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36443-0x101909f957d0002, quorum=127.0.0.1:49789, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T05:51:07,798 INFO [RS:1;0d942cb2025d:36443 {}] regionserver.HRegionServer(1031): Exiting; stopping=0d942cb2025d,36443,1733637025646; zookeeper connection closed. 2024-12-08T05:51:07,798 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1dcdacf4 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1dcdacf4 2024-12-08T05:51:07,891 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-12-08T05:51:07,894 DEBUG [RS:0;0d942cb2025d:36989 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/oldWALs 2024-12-08T05:51:07,894 INFO [RS:0;0d942cb2025d:36989 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 0d942cb2025d%2C36989%2C1733637024575.meta:.meta(num 1733637062691) 2024-12-08T05:51:07,895 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:07,895 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:07,895 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:07,895 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:07,895 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:07,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37825 is added to blk_1073741881_1065 (size=15140) 2024-12-08T05:51:07,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39795 is added to blk_1073741881_1065 (size=15140) 2024-12-08T05:51:07,900 DEBUG [RS:0;0d942cb2025d:36989 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/oldWALs 2024-12-08T05:51:07,900 INFO [RS:0;0d942cb2025d:36989 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 0d942cb2025d%2C36989%2C1733637024575:(num 1733637062450) 2024-12-08T05:51:07,900 DEBUG [RS:0;0d942cb2025d:36989 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T05:51:07,900 INFO [RS:0;0d942cb2025d:36989 {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T05:51:07,900 INFO [RS:0;0d942cb2025d:36989 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T05:51:07,900 INFO [RS:0;0d942cb2025d:36989 {}] hbase.ChoreService(370): Chore service for: regionserver/0d942cb2025d:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-08T05:51:07,901 INFO [RS:0;0d942cb2025d:36989 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T05:51:07,901 INFO [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T05:51:07,901 INFO [RS:0;0d942cb2025d:36989 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36989 2024-12-08T05:51:07,903 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36989-0x101909f957d0001, quorum=127.0.0.1:49789, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/0d942cb2025d,36989,1733637024575 2024-12-08T05:51:07,903 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40185-0x101909f957d0000, quorum=127.0.0.1:49789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T05:51:07,903 INFO [RS:0;0d942cb2025d:36989 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T05:51:07,905 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [0d942cb2025d,36989,1733637024575] 2024-12-08T05:51:07,907 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/0d942cb2025d,36989,1733637024575 already deleted, retry=false 2024-12-08T05:51:07,907 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 0d942cb2025d,36989,1733637024575 expired; onlineServers=0 2024-12-08T05:51:07,907 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '0d942cb2025d,40185,1733637024511' ***** 2024-12-08T05:51:07,907 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-08T05:51:07,907 INFO [M:0;0d942cb2025d:40185 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T05:51:07,907 INFO [M:0;0d942cb2025d:40185 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T05:51:07,907 DEBUG [M:0;0d942cb2025d:40185 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-08T05:51:07,907 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-08T05:51:07,907 DEBUG [M:0;0d942cb2025d:40185 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-08T05:51:07,907 DEBUG [master/0d942cb2025d:0:becomeActiveMaster-HFileCleaner.large.0-1733637024849 {}] cleaner.HFileCleaner(306): Exit Thread[master/0d942cb2025d:0:becomeActiveMaster-HFileCleaner.large.0-1733637024849,5,FailOnTimeoutGroup] 2024-12-08T05:51:07,907 DEBUG [master/0d942cb2025d:0:becomeActiveMaster-HFileCleaner.small.0-1733637024849 {}] cleaner.HFileCleaner(306): Exit Thread[master/0d942cb2025d:0:becomeActiveMaster-HFileCleaner.small.0-1733637024849,5,FailOnTimeoutGroup] 2024-12-08T05:51:07,908 INFO [M:0;0d942cb2025d:40185 {}] hbase.ChoreService(370): Chore service for: master/0d942cb2025d:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-08T05:51:07,908 INFO [M:0;0d942cb2025d:40185 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T05:51:07,908 DEBUG [M:0;0d942cb2025d:40185 {}] master.HMaster(1795): Stopping service threads 2024-12-08T05:51:07,908 INFO [M:0;0d942cb2025d:40185 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-08T05:51:07,908 INFO [M:0;0d942cb2025d:40185 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-08T05:51:07,908 INFO [M:0;0d942cb2025d:40185 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-08T05:51:07,908 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-08T05:51:07,911 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40185-0x101909f957d0000, quorum=127.0.0.1:49789, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-08T05:51:07,911 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40185-0x101909f957d0000, quorum=127.0.0.1:49789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:07,911 DEBUG [M:0;0d942cb2025d:40185 {}] zookeeper.ZKUtil(347): master:40185-0x101909f957d0000, quorum=127.0.0.1:49789, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-08T05:51:07,911 WARN [M:0;0d942cb2025d:40185 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-08T05:51:07,912 INFO [M:0;0d942cb2025d:40185 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/.lastflushedseqids 2024-12-08T05:51:07,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39795 is added to blk_1073741895_1081 (size=130) 2024-12-08T05:51:07,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37825 is added to blk_1073741895_1081 (size=130) 2024-12-08T05:51:07,920 INFO [M:0;0d942cb2025d:40185 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-08T05:51:07,920 INFO [M:0;0d942cb2025d:40185 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-08T05:51:07,920 DEBUG [M:0;0d942cb2025d:40185 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-08T05:51:07,920 INFO [M:0;0d942cb2025d:40185 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:51:07,920 DEBUG [M:0;0d942cb2025d:40185 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:51:07,920 DEBUG [M:0;0d942cb2025d:40185 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-08T05:51:07,920 DEBUG [M:0;0d942cb2025d:40185 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:51:07,920 INFO [M:0;0d942cb2025d:40185 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.25 KB heapSize=29.49 KB 2024-12-08T05:51:07,938 DEBUG [M:0;0d942cb2025d:40185 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4078dcb59ecd47e6baf103bca0d5530f is 82, key is hbase:meta,,1/info:regioninfo/1733637025531/Put/seqid=0 2024-12-08T05:51:07,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39795 is added to blk_1073741896_1082 (size=5672) 2024-12-08T05:51:07,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37825 is added to blk_1073741896_1082 (size=5672) 2024-12-08T05:51:07,947 INFO [M:0;0d942cb2025d:40185 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4078dcb59ecd47e6baf103bca0d5530f 2024-12-08T05:51:07,972 DEBUG [M:0;0d942cb2025d:40185 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1b72a9239d4347298ee49c318bc974f9 is 774, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733637026164/Put/seqid=0 2024-12-08T05:51:07,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37825 is added to blk_1073741897_1083 (size=6255) 2024-12-08T05:51:07,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39795 is added to blk_1073741897_1083 (size=6255) 2024-12-08T05:51:07,978 INFO [M:0;0d942cb2025d:40185 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1b72a9239d4347298ee49c318bc974f9 2024-12-08T05:51:07,984 INFO [M:0;0d942cb2025d:40185 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 1b72a9239d4347298ee49c318bc974f9 2024-12-08T05:51:07,999 DEBUG [M:0;0d942cb2025d:40185 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/cf7681677c7d40cf83b1313748299c7c is 69, key is 0d942cb2025d,36443,1733637025646/rs:state/1733637025700/Put/seqid=0 2024-12-08T05:51:08,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37825 is added to blk_1073741898_1084 (size=5224) 2024-12-08T05:51:08,005 INFO [RS:0;0d942cb2025d:36989 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T05:51:08,005 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36989-0x101909f957d0001, quorum=127.0.0.1:49789, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T05:51:08,005 INFO [RS:0;0d942cb2025d:36989 {}] regionserver.HRegionServer(1031): Exiting; stopping=0d942cb2025d,36989,1733637024575; zookeeper connection closed. 2024-12-08T05:51:08,005 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36989-0x101909f957d0001, quorum=127.0.0.1:49789, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T05:51:08,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39795 is added to blk_1073741898_1084 (size=5224) 2024-12-08T05:51:08,005 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1322adf1 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1322adf1 2024-12-08T05:51:08,005 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-12-08T05:51:08,006 INFO [M:0;0d942cb2025d:40185 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/cf7681677c7d40cf83b1313748299c7c 2024-12-08T05:51:08,032 DEBUG [M:0;0d942cb2025d:40185 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/57ced3defd7c4168813f1701f4a82f3a is 52, key is load_balancer_on/state:d/1733637025628/Put/seqid=0 2024-12-08T05:51:08,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39795 is added to blk_1073741899_1085 (size=5056) 2024-12-08T05:51:08,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37825 is added to blk_1073741899_1085 (size=5056) 2024-12-08T05:51:08,038 INFO [M:0;0d942cb2025d:40185 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/57ced3defd7c4168813f1701f4a82f3a 2024-12-08T05:51:08,045 DEBUG [M:0;0d942cb2025d:40185 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4078dcb59ecd47e6baf103bca0d5530f as hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/4078dcb59ecd47e6baf103bca0d5530f 2024-12-08T05:51:08,052 INFO [M:0;0d942cb2025d:40185 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/4078dcb59ecd47e6baf103bca0d5530f, entries=8, sequenceid=60, filesize=5.5 K 2024-12-08T05:51:08,053 DEBUG [M:0;0d942cb2025d:40185 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1b72a9239d4347298ee49c318bc974f9 as hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/1b72a9239d4347298ee49c318bc974f9 2024-12-08T05:51:08,058 INFO [M:0;0d942cb2025d:40185 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 1b72a9239d4347298ee49c318bc974f9 2024-12-08T05:51:08,058 INFO [M:0;0d942cb2025d:40185 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/1b72a9239d4347298ee49c318bc974f9, entries=6, sequenceid=60, filesize=6.1 K 2024-12-08T05:51:08,059 DEBUG [M:0;0d942cb2025d:40185 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/cf7681677c7d40cf83b1313748299c7c as hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/cf7681677c7d40cf83b1313748299c7c 2024-12-08T05:51:08,064 INFO [M:0;0d942cb2025d:40185 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/cf7681677c7d40cf83b1313748299c7c, entries=2, sequenceid=60, filesize=5.1 K 2024-12-08T05:51:08,065 DEBUG [M:0;0d942cb2025d:40185 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/57ced3defd7c4168813f1701f4a82f3a as hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/57ced3defd7c4168813f1701f4a82f3a 2024-12-08T05:51:08,070 INFO [M:0;0d942cb2025d:40185 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/57ced3defd7c4168813f1701f4a82f3a, entries=1, sequenceid=60, filesize=4.9 K 2024-12-08T05:51:08,072 INFO [M:0;0d942cb2025d:40185 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 152ms, sequenceid=60, compaction requested=false 2024-12-08T05:51:08,081 INFO [M:0;0d942cb2025d:40185 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:51:08,081 DEBUG [M:0;0d942cb2025d:40185 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733637067920Disabling compacts and flushes for region at 1733637067920Disabling writes for close at 1733637067920Obtaining lock to block concurrent updates at 1733637067920Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733637067920Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23805, getHeapSize=30136, getOffHeapSize=0, getCellsCount=71 at 1733637067921 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733637067921Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733637067922 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733637067938 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733637067938Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733637067955 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733637067971 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733637067971Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733637067984 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733637067999 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733637067999Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733637068014 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733637068032 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733637068032Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@90202bd: reopening flushed file at 1733637068044 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@170ae1c6: reopening flushed file at 1733637068052 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@673552f8: reopening flushed file at 1733637068058 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@669c3cf2: reopening flushed file at 1733637068064 (+6 ms)Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 152ms, sequenceid=60, compaction requested=false at 1733637068072 (+8 ms)Writing region close event to WAL at 1733637068081 (+9 ms)Closed at 1733637068081 2024-12-08T05:51:08,082 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:08,082 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:08,082 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:08,082 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:08,082 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:08,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37825 is added to blk_1073741879_1062 (size=1045) 2024-12-08T05:51:08,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39795 is added to blk_1073741879_1062 (size=1045) 2024-12-08T05:51:08,085 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T05:51:08,085 INFO [M:0;0d942cb2025d:40185 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-08T05:51:08,085 INFO [M:0;0d942cb2025d:40185 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40185 2024-12-08T05:51:08,086 INFO [M:0;0d942cb2025d:40185 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T05:51:08,187 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40185-0x101909f957d0000, quorum=127.0.0.1:49789, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T05:51:08,187 INFO [M:0;0d942cb2025d:40185 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T05:51:08,187 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40185-0x101909f957d0000, quorum=127.0.0.1:49789, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T05:51:08,190 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@507ee43d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T05:51:08,190 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5319e63c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T05:51:08,190 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T05:51:08,190 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3a864bf5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T05:51:08,190 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2ed2ea19{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/hadoop.log.dir/,STOPPED} 2024-12-08T05:51:08,192 WARN [BP-1312343861-172.17.0.2-1733637023632 heartbeating to localhost/127.0.0.1:46561 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T05:51:08,192 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T05:51:08,192 WARN [BP-1312343861-172.17.0.2-1733637023632 heartbeating to localhost/127.0.0.1:46561 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1312343861-172.17.0.2-1733637023632 (Datanode Uuid efd53946-5941-41e2-9b0b-bda82ebd7dd4) service to localhost/127.0.0.1:46561 2024-12-08T05:51:08,192 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T05:51:08,192 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@25c31f75 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1312343861-172.17.0.2-1733637023632:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:42141,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:40781 , LocalHost:localPort 0d942cb2025d/172.17.0.2:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-08T05:51:08,192 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@25c31f75 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-1312343861-172.17.0.2-1733637023632:blk_1073741837_1013; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:37825,null,null], DatanodeInfoWithStorage[127.0.0.1:42141,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: No block pool offer service for bpid=BP-1312343861-172.17.0.2-1733637023632 2024-12-08T05:51:08,193 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@25c31f75 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1312343861-172.17.0.2-1733637023632:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:42141,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1312343861-172.17.0.2-1733637023632 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:51:08,193 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/cluster_ad2183ec-0874-c6b8-5c7f-1444c5a18074/data/data3/current/BP-1312343861-172.17.0.2-1733637023632 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T05:51:08,193 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@25c31f75 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1312343861-172.17.0.2-1733637023632:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:37825,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1312343861-172.17.0.2-1733637023632 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:51:08,193 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@25c31f75 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-1312343861-172.17.0.2-1733637023632:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:42141,null,null], DatanodeInfoWithStorage[127.0.0.1:37825,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-1312343861-172.17.0.2-1733637023632:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:42141,null,null], DatanodeInfoWithStorage[127.0.0.1:37825,null,null]] 2024-12-08T05:51:08,193 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/cluster_ad2183ec-0874-c6b8-5c7f-1444c5a18074/data/data4/current/BP-1312343861-172.17.0.2-1733637023632 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T05:51:08,194 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T05:51:08,196 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@431f0ae0{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T05:51:08,196 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2a7d29fa{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T05:51:08,196 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T05:51:08,196 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@168c0f83{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T05:51:08,196 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@43b67cb4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/hadoop.log.dir/,STOPPED} 2024-12-08T05:51:08,197 WARN [BP-1312343861-172.17.0.2-1733637023632 heartbeating to localhost/127.0.0.1:46561 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T05:51:08,197 WARN [BP-1312343861-172.17.0.2-1733637023632 heartbeating to localhost/127.0.0.1:46561 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1312343861-172.17.0.2-1733637023632 (Datanode Uuid 107d6efa-7b49-4521-92ae-23356ed2ace3) service to localhost/127.0.0.1:46561 2024-12-08T05:51:08,197 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T05:51:08,198 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T05:51:08,198 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/cluster_ad2183ec-0874-c6b8-5c7f-1444c5a18074/data/data5/current/BP-1312343861-172.17.0.2-1733637023632 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T05:51:08,199 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/cluster_ad2183ec-0874-c6b8-5c7f-1444c5a18074/data/data6/current/BP-1312343861-172.17.0.2-1733637023632 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T05:51:08,199 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T05:51:08,205 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7cf66d4d{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-08T05:51:08,205 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@24f9a05e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T05:51:08,205 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T05:51:08,206 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2e510a6a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T05:51:08,206 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4c83390f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/hadoop.log.dir/,STOPPED} 2024-12-08T05:51:08,215 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-08T05:51:08,248 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-08T05:51:08,258 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=155 (was 81) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007fa1e4bf4ff8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46561 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:46561 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007fa1e4bf4ff8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:44287 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1609174458) connection to localhost/127.0.0.1:46561 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46561 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1609174458) connection to localhost/127.0.0.1:46561 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:44287 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46561 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1609174458) connection to localhost/127.0.0.1:46561 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-14-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:46561 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:46561 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1609174458) connection to localhost/127.0.0.1:46561 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-14-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46561 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=454 (was 404) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=158 (was 172), ProcessCount=11 (was 11), AvailableMemoryMB=7433 (was 8445) 2024-12-08T05:51:08,266 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=155, OpenFileDescriptor=454, MaxFileDescriptor=1048576, SystemLoadAverage=158, ProcessCount=11, AvailableMemoryMB=7433 2024-12-08T05:51:08,267 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-08T05:51:08,267 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/hadoop.log.dir so I do NOT create it in target/test-data/f73842bd-509c-ac2e-22fb-16b6ad6bb8f8 2024-12-08T05:51:08,267 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/638ac2b2-592f-a1dc-2b6d-260481bcb794/hadoop.tmp.dir so I do NOT create it in target/test-data/f73842bd-509c-ac2e-22fb-16b6ad6bb8f8 2024-12-08T05:51:08,267 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f73842bd-509c-ac2e-22fb-16b6ad6bb8f8/cluster_8a42907d-6224-13e1-832f-048855039e98, deleteOnExit=true 2024-12-08T05:51:08,267 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-08T05:51:08,267 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f73842bd-509c-ac2e-22fb-16b6ad6bb8f8/test.cache.data in system properties and HBase conf 2024-12-08T05:51:08,267 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f73842bd-509c-ac2e-22fb-16b6ad6bb8f8/hadoop.tmp.dir in system properties and HBase conf 2024-12-08T05:51:08,267 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f73842bd-509c-ac2e-22fb-16b6ad6bb8f8/hadoop.log.dir in system properties and HBase conf 2024-12-08T05:51:08,267 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f73842bd-509c-ac2e-22fb-16b6ad6bb8f8/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-08T05:51:08,267 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f73842bd-509c-ac2e-22fb-16b6ad6bb8f8/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-08T05:51:08,267 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-08T05:51:08,267 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-08T05:51:08,268 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f73842bd-509c-ac2e-22fb-16b6ad6bb8f8/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-08T05:51:08,268 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f73842bd-509c-ac2e-22fb-16b6ad6bb8f8/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-08T05:51:08,268 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f73842bd-509c-ac2e-22fb-16b6ad6bb8f8/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-08T05:51:08,268 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f73842bd-509c-ac2e-22fb-16b6ad6bb8f8/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-08T05:51:08,268 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f73842bd-509c-ac2e-22fb-16b6ad6bb8f8/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-08T05:51:08,268 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f73842bd-509c-ac2e-22fb-16b6ad6bb8f8/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-08T05:51:08,268 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f73842bd-509c-ac2e-22fb-16b6ad6bb8f8/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-08T05:51:08,268 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f73842bd-509c-ac2e-22fb-16b6ad6bb8f8/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-08T05:51:08,268 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f73842bd-509c-ac2e-22fb-16b6ad6bb8f8/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-08T05:51:08,268 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f73842bd-509c-ac2e-22fb-16b6ad6bb8f8/nfs.dump.dir in system properties and HBase conf 2024-12-08T05:51:08,268 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f73842bd-509c-ac2e-22fb-16b6ad6bb8f8/java.io.tmpdir in system properties and HBase conf 2024-12-08T05:51:08,268 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f73842bd-509c-ac2e-22fb-16b6ad6bb8f8/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-08T05:51:08,268 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f73842bd-509c-ac2e-22fb-16b6ad6bb8f8/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-08T05:51:08,268 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f73842bd-509c-ac2e-22fb-16b6ad6bb8f8/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-08T05:51:08,271 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-08T05:51:08,289 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-08T05:51:08,295 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:08,296 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:08,296 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:08,297 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:08,297 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:08,298 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:08,303 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:08,303 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:08,304 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:08,308 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:08,405 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T05:51:08,409 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T05:51:08,412 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T05:51:08,412 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T05:51:08,412 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-08T05:51:08,413 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T05:51:08,413 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ad19754{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f73842bd-509c-ac2e-22fb-16b6ad6bb8f8/hadoop.log.dir/,AVAILABLE} 2024-12-08T05:51:08,414 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@f6eeaf0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T05:51:08,534 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4b056d01{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f73842bd-509c-ac2e-22fb-16b6ad6bb8f8/java.io.tmpdir/jetty-localhost-34161-hadoop-hdfs-3_4_1-tests_jar-_-any-6378975556700705760/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-08T05:51:08,534 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@24dc4202{HTTP/1.1, (http/1.1)}{localhost:34161} 2024-12-08T05:51:08,534 INFO [Time-limited test {}] server.Server(415): Started @148718ms 2024-12-08T05:51:08,547 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-08T05:51:08,623 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T05:51:08,626 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T05:51:08,627 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T05:51:08,627 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T05:51:08,627 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T05:51:08,628 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@65985ee{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f73842bd-509c-ac2e-22fb-16b6ad6bb8f8/hadoop.log.dir/,AVAILABLE} 2024-12-08T05:51:08,628 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6ccc2c47{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T05:51:08,699 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:08,706 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:08,743 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@416b876f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f73842bd-509c-ac2e-22fb-16b6ad6bb8f8/java.io.tmpdir/jetty-localhost-43255-hadoop-hdfs-3_4_1-tests_jar-_-any-1623466383066270091/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T05:51:08,743 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@25c86e24{HTTP/1.1, (http/1.1)}{localhost:43255} 2024-12-08T05:51:08,744 INFO [Time-limited test {}] server.Server(415): Started @148928ms 2024-12-08T05:51:08,745 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T05:51:08,789 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T05:51:08,793 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T05:51:08,795 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T05:51:08,795 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T05:51:08,795 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T05:51:08,796 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@778fe5bf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f73842bd-509c-ac2e-22fb-16b6ad6bb8f8/hadoop.log.dir/,AVAILABLE} 2024-12-08T05:51:08,797 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@10a74eb4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T05:51:08,852 WARN [Thread-1184 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f73842bd-509c-ac2e-22fb-16b6ad6bb8f8/cluster_8a42907d-6224-13e1-832f-048855039e98/data/data1/current/BP-1308812349-172.17.0.2-1733637068326/current, will proceed with Du for space computation calculation, 2024-12-08T05:51:08,852 WARN [Thread-1185 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f73842bd-509c-ac2e-22fb-16b6ad6bb8f8/cluster_8a42907d-6224-13e1-832f-048855039e98/data/data2/current/BP-1308812349-172.17.0.2-1733637068326/current, will proceed with Du for space computation calculation, 2024-12-08T05:51:08,869 WARN [Thread-1163 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T05:51:08,872 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x877eb769e0399573 with lease ID 0xe044214c10bc1ab3: Processing first storage report for DS-95d0f3e1-c73e-44c0-bdc2-8a5d260db9d3 from datanode DatanodeRegistration(127.0.0.1:41681, datanodeUuid=b33389dd-32ed-4bbb-ad35-fee83d497013, infoPort=46757, infoSecurePort=0, ipcPort=33297, storageInfo=lv=-57;cid=testClusterID;nsid=831063879;c=1733637068326) 2024-12-08T05:51:08,872 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x877eb769e0399573 with lease ID 0xe044214c10bc1ab3: from storage DS-95d0f3e1-c73e-44c0-bdc2-8a5d260db9d3 node DatanodeRegistration(127.0.0.1:41681, datanodeUuid=b33389dd-32ed-4bbb-ad35-fee83d497013, infoPort=46757, infoSecurePort=0, ipcPort=33297, storageInfo=lv=-57;cid=testClusterID;nsid=831063879;c=1733637068326), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-08T05:51:08,872 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x877eb769e0399573 with lease ID 0xe044214c10bc1ab3: Processing first storage report for DS-c0829470-c149-4e17-a594-a6372c390c46 from datanode DatanodeRegistration(127.0.0.1:41681, datanodeUuid=b33389dd-32ed-4bbb-ad35-fee83d497013, infoPort=46757, infoSecurePort=0, ipcPort=33297, storageInfo=lv=-57;cid=testClusterID;nsid=831063879;c=1733637068326) 2024-12-08T05:51:08,872 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x877eb769e0399573 with lease ID 0xe044214c10bc1ab3: from storage DS-c0829470-c149-4e17-a594-a6372c390c46 node DatanodeRegistration(127.0.0.1:41681, datanodeUuid=b33389dd-32ed-4bbb-ad35-fee83d497013, infoPort=46757, infoSecurePort=0, ipcPort=33297, storageInfo=lv=-57;cid=testClusterID;nsid=831063879;c=1733637068326), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T05:51:08,932 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2703b5c7{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f73842bd-509c-ac2e-22fb-16b6ad6bb8f8/java.io.tmpdir/jetty-localhost-45391-hadoop-hdfs-3_4_1-tests_jar-_-any-2881851476460471373/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T05:51:08,932 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@172ad5fa{HTTP/1.1, (http/1.1)}{localhost:45391} 2024-12-08T05:51:08,932 INFO [Time-limited test {}] server.Server(415): Started @149116ms 2024-12-08T05:51:08,933 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T05:51:09,169 WARN [Thread-1211 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f73842bd-509c-ac2e-22fb-16b6ad6bb8f8/cluster_8a42907d-6224-13e1-832f-048855039e98/data/data4/current/BP-1308812349-172.17.0.2-1733637068326/current, will proceed with Du for space computation calculation, 2024-12-08T05:51:09,169 WARN [Thread-1210 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f73842bd-509c-ac2e-22fb-16b6ad6bb8f8/cluster_8a42907d-6224-13e1-832f-048855039e98/data/data3/current/BP-1308812349-172.17.0.2-1733637068326/current, will proceed with Du for space computation calculation, 2024-12-08T05:51:09,187 WARN [Thread-1199 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T05:51:09,189 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2fef338f6046d617 with lease ID 0xe044214c10bc1ab4: Processing first storage report for DS-17a3c984-c607-4535-a9ef-b49c60ee4fab from datanode DatanodeRegistration(127.0.0.1:32899, datanodeUuid=0f32638d-08d9-44b7-8a7c-68c5a3f38827, infoPort=35061, infoSecurePort=0, ipcPort=33661, storageInfo=lv=-57;cid=testClusterID;nsid=831063879;c=1733637068326) 2024-12-08T05:51:09,189 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2fef338f6046d617 with lease ID 0xe044214c10bc1ab4: from storage DS-17a3c984-c607-4535-a9ef-b49c60ee4fab node DatanodeRegistration(127.0.0.1:32899, datanodeUuid=0f32638d-08d9-44b7-8a7c-68c5a3f38827, infoPort=35061, infoSecurePort=0, ipcPort=33661, storageInfo=lv=-57;cid=testClusterID;nsid=831063879;c=1733637068326), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T05:51:09,189 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2fef338f6046d617 with lease ID 0xe044214c10bc1ab4: Processing first storage report for DS-2708d9c2-7d44-485b-a284-da2c11fc0327 from datanode DatanodeRegistration(127.0.0.1:32899, datanodeUuid=0f32638d-08d9-44b7-8a7c-68c5a3f38827, infoPort=35061, infoSecurePort=0, ipcPort=33661, storageInfo=lv=-57;cid=testClusterID;nsid=831063879;c=1733637068326) 2024-12-08T05:51:09,189 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2fef338f6046d617 with lease ID 0xe044214c10bc1ab4: from storage DS-2708d9c2-7d44-485b-a284-da2c11fc0327 node DatanodeRegistration(127.0.0.1:32899, datanodeUuid=0f32638d-08d9-44b7-8a7c-68c5a3f38827, infoPort=35061, infoSecurePort=0, ipcPort=33661, storageInfo=lv=-57;cid=testClusterID;nsid=831063879;c=1733637068326), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T05:51:09,261 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f73842bd-509c-ac2e-22fb-16b6ad6bb8f8 2024-12-08T05:51:09,264 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f73842bd-509c-ac2e-22fb-16b6ad6bb8f8/cluster_8a42907d-6224-13e1-832f-048855039e98/zookeeper_0, clientPort=61807, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f73842bd-509c-ac2e-22fb-16b6ad6bb8f8/cluster_8a42907d-6224-13e1-832f-048855039e98/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f73842bd-509c-ac2e-22fb-16b6ad6bb8f8/cluster_8a42907d-6224-13e1-832f-048855039e98/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-08T05:51:09,265 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=61807 2024-12-08T05:51:09,265 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:51:09,267 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:51:09,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41681 is added to blk_1073741825_1001 (size=7) 2024-12-08T05:51:09,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32899 is added to blk_1073741825_1001 (size=7) 2024-12-08T05:51:09,280 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4 with version=8 2024-12-08T05:51:09,280 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/hbase-staging 2024-12-08T05:51:09,282 INFO [Time-limited test {}] client.ConnectionUtils(128): master/0d942cb2025d:0 server-side Connection retries=45 2024-12-08T05:51:09,283 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T05:51:09,283 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T05:51:09,283 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T05:51:09,283 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T05:51:09,283 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T05:51:09,283 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-08T05:51:09,283 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T05:51:09,284 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36481 2024-12-08T05:51:09,286 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:36481 connecting to ZooKeeper ensemble=127.0.0.1:61807 2024-12-08T05:51:09,292 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:364810x0, quorum=127.0.0.1:61807, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T05:51:09,296 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:36481-0x10190a0445e0000 connected 2024-12-08T05:51:09,329 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:51:09,331 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:51:09,337 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36481-0x10190a0445e0000, quorum=127.0.0.1:61807, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T05:51:09,337 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4, hbase.cluster.distributed=false 2024-12-08T05:51:09,340 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36481-0x10190a0445e0000, quorum=127.0.0.1:61807, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T05:51:09,340 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36481 2024-12-08T05:51:09,342 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36481 2024-12-08T05:51:09,343 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36481 2024-12-08T05:51:09,344 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36481 2024-12-08T05:51:09,348 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36481 2024-12-08T05:51:09,369 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/0d942cb2025d:0 server-side Connection retries=45 2024-12-08T05:51:09,369 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T05:51:09,369 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T05:51:09,370 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T05:51:09,370 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T05:51:09,370 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T05:51:09,370 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-08T05:51:09,370 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T05:51:09,371 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43211 2024-12-08T05:51:09,372 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43211 connecting to ZooKeeper ensemble=127.0.0.1:61807 2024-12-08T05:51:09,373 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:51:09,376 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:51:09,381 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:432110x0, quorum=127.0.0.1:61807, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T05:51:09,382 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:432110x0, quorum=127.0.0.1:61807, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T05:51:09,382 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43211-0x10190a0445e0001 connected 2024-12-08T05:51:09,382 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-08T05:51:09,384 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-08T05:51:09,385 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43211-0x10190a0445e0001, quorum=127.0.0.1:61807, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-08T05:51:09,386 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43211-0x10190a0445e0001, quorum=127.0.0.1:61807, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T05:51:09,387 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43211 2024-12-08T05:51:09,387 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43211 2024-12-08T05:51:09,389 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43211 2024-12-08T05:51:09,390 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43211 2024-12-08T05:51:09,390 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43211 2024-12-08T05:51:09,404 DEBUG [M:0;0d942cb2025d:36481 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;0d942cb2025d:36481 2024-12-08T05:51:09,407 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/0d942cb2025d,36481,1733637069282 2024-12-08T05:51:09,409 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43211-0x10190a0445e0001, quorum=127.0.0.1:61807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T05:51:09,409 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36481-0x10190a0445e0000, quorum=127.0.0.1:61807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T05:51:09,409 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36481-0x10190a0445e0000, quorum=127.0.0.1:61807, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/0d942cb2025d,36481,1733637069282 2024-12-08T05:51:09,412 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43211-0x10190a0445e0001, quorum=127.0.0.1:61807, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-08T05:51:09,412 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36481-0x10190a0445e0000, quorum=127.0.0.1:61807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:09,412 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43211-0x10190a0445e0001, quorum=127.0.0.1:61807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:09,414 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36481-0x10190a0445e0000, quorum=127.0.0.1:61807, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-08T05:51:09,414 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/0d942cb2025d,36481,1733637069282 from backup master directory 2024-12-08T05:51:09,416 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43211-0x10190a0445e0001, quorum=127.0.0.1:61807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T05:51:09,416 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36481-0x10190a0445e0000, quorum=127.0.0.1:61807, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/0d942cb2025d,36481,1733637069282 2024-12-08T05:51:09,416 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36481-0x10190a0445e0000, quorum=127.0.0.1:61807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T05:51:09,416 WARN [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T05:51:09,416 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=0d942cb2025d,36481,1733637069282 2024-12-08T05:51:09,421 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/hbase.id] with ID: f24efdfe-3881-4cd5-bf24-a9b9085565db 2024-12-08T05:51:09,421 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/.tmp/hbase.id 2024-12-08T05:51:09,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32899 is added to blk_1073741826_1002 (size=42) 2024-12-08T05:51:09,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41681 is added to blk_1073741826_1002 (size=42) 2024-12-08T05:51:09,432 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/.tmp/hbase.id]:[hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/hbase.id] 2024-12-08T05:51:09,447 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:51:09,447 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-08T05:51:09,448 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-08T05:51:09,450 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36481-0x10190a0445e0000, quorum=127.0.0.1:61807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:09,450 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43211-0x10190a0445e0001, quorum=127.0.0.1:61807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:09,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41681 is added to blk_1073741827_1003 (size=196) 2024-12-08T05:51:09,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32899 is added to blk_1073741827_1003 (size=196) 2024-12-08T05:51:09,458 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T05:51:09,459 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-08T05:51:09,459 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T05:51:09,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41681 is added to blk_1073741828_1004 (size=1189) 2024-12-08T05:51:09,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32899 is added to blk_1073741828_1004 (size=1189) 2024-12-08T05:51:09,468 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/MasterData/data/master/store 2024-12-08T05:51:09,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41681 is added to blk_1073741829_1005 (size=34) 2024-12-08T05:51:09,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32899 is added to blk_1073741829_1005 (size=34) 2024-12-08T05:51:09,476 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T05:51:09,477 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-08T05:51:09,477 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:51:09,477 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:51:09,477 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-08T05:51:09,477 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:51:09,477 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:51:09,477 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733637069477Disabling compacts and flushes for region at 1733637069477Disabling writes for close at 1733637069477Writing region close event to WAL at 1733637069477Closed at 1733637069477 2024-12-08T05:51:09,478 WARN [master/0d942cb2025d:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/MasterData/data/master/store/.initializing 2024-12-08T05:51:09,478 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/MasterData/WALs/0d942cb2025d,36481,1733637069282 2024-12-08T05:51:09,481 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0d942cb2025d%2C36481%2C1733637069282, suffix=, logDir=hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/MasterData/WALs/0d942cb2025d,36481,1733637069282, archiveDir=hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/MasterData/oldWALs, maxLogs=10 2024-12-08T05:51:09,481 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0d942cb2025d%2C36481%2C1733637069282.1733637069481 2024-12-08T05:51:09,485 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/MasterData/WALs/0d942cb2025d,36481,1733637069282/0d942cb2025d%2C36481%2C1733637069282.1733637069481 2024-12-08T05:51:09,490 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46757:46757),(127.0.0.1/127.0.0.1:35061:35061)] 2024-12-08T05:51:09,494 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-08T05:51:09,494 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T05:51:09,495 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:51:09,495 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:51:09,496 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:51:09,498 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-08T05:51:09,498 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:51:09,498 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:51:09,499 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:51:09,500 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-08T05:51:09,500 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:51:09,500 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T05:51:09,500 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:51:09,502 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-08T05:51:09,502 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:51:09,502 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T05:51:09,502 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:51:09,503 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-08T05:51:09,504 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:51:09,504 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T05:51:09,504 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:51:09,505 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:51:09,505 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:51:09,506 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:51:09,506 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:51:09,507 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-08T05:51:09,508 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:51:09,514 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T05:51:09,515 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=689276, jitterRate=-0.12354066967964172}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-08T05:51:09,516 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733637069495Initializing all the Stores at 1733637069496 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733637069496Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733637069496Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733637069496Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733637069496Cleaning up temporary data from old regions at 1733637069506 (+10 ms)Region opened successfully at 1733637069516 (+10 ms) 2024-12-08T05:51:09,516 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-08T05:51:09,520 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@401cda9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0d942cb2025d/172.17.0.2:0 2024-12-08T05:51:09,521 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-08T05:51:09,522 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-08T05:51:09,522 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-08T05:51:09,522 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-08T05:51:09,522 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-08T05:51:09,523 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-08T05:51:09,523 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-08T05:51:09,528 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-08T05:51:09,528 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36481-0x10190a0445e0000, quorum=127.0.0.1:61807, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-08T05:51:09,530 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-08T05:51:09,530 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-08T05:51:09,531 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36481-0x10190a0445e0000, quorum=127.0.0.1:61807, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-08T05:51:09,532 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-08T05:51:09,533 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-08T05:51:09,536 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36481-0x10190a0445e0000, quorum=127.0.0.1:61807, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-08T05:51:09,539 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-08T05:51:09,540 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36481-0x10190a0445e0000, quorum=127.0.0.1:61807, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-08T05:51:09,542 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-08T05:51:09,544 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36481-0x10190a0445e0000, quorum=127.0.0.1:61807, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-08T05:51:09,546 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-08T05:51:09,548 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43211-0x10190a0445e0001, quorum=127.0.0.1:61807, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T05:51:09,548 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36481-0x10190a0445e0000, quorum=127.0.0.1:61807, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T05:51:09,548 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43211-0x10190a0445e0001, quorum=127.0.0.1:61807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:09,548 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36481-0x10190a0445e0000, quorum=127.0.0.1:61807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:09,549 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=0d942cb2025d,36481,1733637069282, sessionid=0x10190a0445e0000, setting cluster-up flag (Was=false) 2024-12-08T05:51:09,558 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43211-0x10190a0445e0001, quorum=127.0.0.1:61807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:09,558 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36481-0x10190a0445e0000, quorum=127.0.0.1:61807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:09,565 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-08T05:51:09,566 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0d942cb2025d,36481,1733637069282 2024-12-08T05:51:09,570 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43211-0x10190a0445e0001, quorum=127.0.0.1:61807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:09,570 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36481-0x10190a0445e0000, quorum=127.0.0.1:61807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:09,578 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-08T05:51:09,579 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0d942cb2025d,36481,1733637069282 2024-12-08T05:51:09,588 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-08T05:51:09,590 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-08T05:51:09,591 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-08T05:51:09,591 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-08T05:51:09,591 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 0d942cb2025d,36481,1733637069282 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-08T05:51:09,593 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/0d942cb2025d:0, corePoolSize=5, maxPoolSize=5 2024-12-08T05:51:09,593 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/0d942cb2025d:0, corePoolSize=5, maxPoolSize=5 2024-12-08T05:51:09,593 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/0d942cb2025d:0, corePoolSize=5, maxPoolSize=5 2024-12-08T05:51:09,593 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/0d942cb2025d:0, corePoolSize=5, maxPoolSize=5 2024-12-08T05:51:09,593 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/0d942cb2025d:0, corePoolSize=10, maxPoolSize=10 2024-12-08T05:51:09,593 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:09,593 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/0d942cb2025d:0, corePoolSize=2, maxPoolSize=2 2024-12-08T05:51:09,593 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:09,594 INFO [RS:0;0d942cb2025d:43211 {}] regionserver.HRegionServer(746): ClusterId : f24efdfe-3881-4cd5-bf24-a9b9085565db 2024-12-08T05:51:09,594 DEBUG [RS:0;0d942cb2025d:43211 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-08T05:51:09,597 DEBUG [RS:0;0d942cb2025d:43211 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-08T05:51:09,597 DEBUG [RS:0;0d942cb2025d:43211 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-08T05:51:09,599 DEBUG [RS:0;0d942cb2025d:43211 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-08T05:51:09,599 DEBUG [RS:0;0d942cb2025d:43211 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2d6e15ca, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0d942cb2025d/172.17.0.2:0 2024-12-08T05:51:09,604 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T05:51:09,604 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-08T05:51:09,606 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:51:09,606 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-08T05:51:09,607 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733637099607 2024-12-08T05:51:09,608 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-08T05:51:09,608 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-08T05:51:09,608 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-08T05:51:09,608 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-08T05:51:09,608 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-08T05:51:09,608 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-08T05:51:09,616 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:09,617 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-08T05:51:09,617 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-08T05:51:09,617 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-08T05:51:09,618 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-08T05:51:09,618 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-08T05:51:09,618 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/0d942cb2025d:0:becomeActiveMaster-HFileCleaner.large.0-1733637069618,5,FailOnTimeoutGroup] 2024-12-08T05:51:09,618 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/0d942cb2025d:0:becomeActiveMaster-HFileCleaner.small.0-1733637069618,5,FailOnTimeoutGroup] 2024-12-08T05:51:09,618 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:09,618 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-08T05:51:09,618 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:09,618 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:09,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32899 is added to blk_1073741831_1007 (size=1321) 2024-12-08T05:51:09,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41681 is added to blk_1073741831_1007 (size=1321) 2024-12-08T05:51:09,630 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-08T05:51:09,630 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4 2024-12-08T05:51:09,631 DEBUG [RS:0;0d942cb2025d:43211 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;0d942cb2025d:43211 2024-12-08T05:51:09,631 INFO [RS:0;0d942cb2025d:43211 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-08T05:51:09,631 INFO [RS:0;0d942cb2025d:43211 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-08T05:51:09,631 DEBUG [RS:0;0d942cb2025d:43211 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-08T05:51:09,633 INFO [RS:0;0d942cb2025d:43211 {}] regionserver.HRegionServer(2659): reportForDuty to master=0d942cb2025d,36481,1733637069282 with port=43211, startcode=1733637069369 2024-12-08T05:51:09,633 DEBUG [RS:0;0d942cb2025d:43211 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-08T05:51:09,640 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36091, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-08T05:51:09,641 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36481 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 0d942cb2025d,43211,1733637069369 2024-12-08T05:51:09,641 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36481 {}] master.ServerManager(517): Registering regionserver=0d942cb2025d,43211,1733637069369 2024-12-08T05:51:09,643 DEBUG [RS:0;0d942cb2025d:43211 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4 2024-12-08T05:51:09,643 DEBUG [RS:0;0d942cb2025d:43211 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34501 2024-12-08T05:51:09,643 DEBUG [RS:0;0d942cb2025d:43211 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-08T05:51:09,646 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36481-0x10190a0445e0000, quorum=127.0.0.1:61807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T05:51:09,647 DEBUG [RS:0;0d942cb2025d:43211 {}] zookeeper.ZKUtil(111): regionserver:43211-0x10190a0445e0001, quorum=127.0.0.1:61807, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/0d942cb2025d,43211,1733637069369 2024-12-08T05:51:09,647 WARN [RS:0;0d942cb2025d:43211 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T05:51:09,647 INFO [RS:0;0d942cb2025d:43211 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T05:51:09,647 DEBUG [RS:0;0d942cb2025d:43211 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369 2024-12-08T05:51:09,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41681 is added to blk_1073741832_1008 (size=32) 2024-12-08T05:51:09,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32899 is added to blk_1073741832_1008 (size=32) 2024-12-08T05:51:09,657 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T05:51:09,661 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [0d942cb2025d,43211,1733637069369] 2024-12-08T05:51:09,665 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-08T05:51:09,667 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-08T05:51:09,667 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:51:09,668 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:51:09,668 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-08T05:51:09,668 INFO [RS:0;0d942cb2025d:43211 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-08T05:51:09,669 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-08T05:51:09,669 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:51:09,670 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:51:09,670 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-08T05:51:09,672 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-08T05:51:09,672 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:51:09,672 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:51:09,672 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-08T05:51:09,672 INFO [RS:0;0d942cb2025d:43211 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-08T05:51:09,674 INFO [RS:0;0d942cb2025d:43211 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-08T05:51:09,674 INFO [RS:0;0d942cb2025d:43211 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:09,674 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-08T05:51:09,674 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:51:09,674 INFO [RS:0;0d942cb2025d:43211 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-08T05:51:09,675 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:51:09,675 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-08T05:51:09,675 INFO [RS:0;0d942cb2025d:43211 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-08T05:51:09,675 INFO [RS:0;0d942cb2025d:43211 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:09,675 DEBUG [RS:0;0d942cb2025d:43211 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:09,675 DEBUG [RS:0;0d942cb2025d:43211 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:09,675 DEBUG [RS:0;0d942cb2025d:43211 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:09,675 DEBUG [RS:0;0d942cb2025d:43211 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:09,675 DEBUG [RS:0;0d942cb2025d:43211 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:09,676 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/data/hbase/meta/1588230740 2024-12-08T05:51:09,676 DEBUG [RS:0;0d942cb2025d:43211 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/0d942cb2025d:0, corePoolSize=2, maxPoolSize=2 2024-12-08T05:51:09,676 DEBUG [RS:0;0d942cb2025d:43211 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:09,676 DEBUG [RS:0;0d942cb2025d:43211 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:09,676 DEBUG [RS:0;0d942cb2025d:43211 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:09,676 DEBUG [RS:0;0d942cb2025d:43211 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:09,676 DEBUG [RS:0;0d942cb2025d:43211 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:09,676 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/data/hbase/meta/1588230740 2024-12-08T05:51:09,676 DEBUG [RS:0;0d942cb2025d:43211 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:09,676 DEBUG [RS:0;0d942cb2025d:43211 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/0d942cb2025d:0, corePoolSize=3, maxPoolSize=3 2024-12-08T05:51:09,676 DEBUG [RS:0;0d942cb2025d:43211 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0, corePoolSize=3, maxPoolSize=3 2024-12-08T05:51:09,678 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-08T05:51:09,678 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-08T05:51:09,678 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-08T05:51:09,680 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-08T05:51:09,687 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T05:51:09,687 INFO [RS:0;0d942cb2025d:43211 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:09,687 INFO [RS:0;0d942cb2025d:43211 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:09,687 INFO [RS:0;0d942cb2025d:43211 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:09,687 INFO [RS:0;0d942cb2025d:43211 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:09,687 INFO [RS:0;0d942cb2025d:43211 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:09,687 INFO [RS:0;0d942cb2025d:43211 {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,43211,1733637069369-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T05:51:09,687 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=882379, jitterRate=0.12200410664081573}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-08T05:51:09,688 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733637069657Initializing all the Stores at 1733637069658 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733637069658Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733637069664 (+6 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733637069664Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733637069664Cleaning up temporary data from old regions at 1733637069678 (+14 ms)Region opened successfully at 1733637069688 (+10 ms) 2024-12-08T05:51:09,688 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-08T05:51:09,689 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-08T05:51:09,689 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-08T05:51:09,689 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-08T05:51:09,689 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-08T05:51:09,696 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-08T05:51:09,696 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733637069688Disabling compacts and flushes for region at 1733637069688Disabling writes for close at 1733637069689 (+1 ms)Writing region close event to WAL at 1733637069696 (+7 ms)Closed at 1733637069696 2024-12-08T05:51:09,698 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T05:51:09,698 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-08T05:51:09,699 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-08T05:51:09,700 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:09,703 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-08T05:51:09,705 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-08T05:51:09,706 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:09,714 INFO [RS:0;0d942cb2025d:43211 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-08T05:51:09,714 INFO [RS:0;0d942cb2025d:43211 {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,43211,1733637069369-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:09,714 INFO [RS:0;0d942cb2025d:43211 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:09,714 INFO [RS:0;0d942cb2025d:43211 {}] regionserver.Replication(171): 0d942cb2025d,43211,1733637069369 started 2024-12-08T05:51:09,737 INFO [RS:0;0d942cb2025d:43211 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:09,737 INFO [RS:0;0d942cb2025d:43211 {}] regionserver.HRegionServer(1482): Serving as 0d942cb2025d,43211,1733637069369, RpcServer on 0d942cb2025d/172.17.0.2:43211, sessionid=0x10190a0445e0001 2024-12-08T05:51:09,737 DEBUG [RS:0;0d942cb2025d:43211 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-08T05:51:09,737 DEBUG [RS:0;0d942cb2025d:43211 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 0d942cb2025d,43211,1733637069369 2024-12-08T05:51:09,738 DEBUG [RS:0;0d942cb2025d:43211 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0d942cb2025d,43211,1733637069369' 2024-12-08T05:51:09,738 DEBUG [RS:0;0d942cb2025d:43211 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-08T05:51:09,738 DEBUG [RS:0;0d942cb2025d:43211 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-08T05:51:09,739 DEBUG [RS:0;0d942cb2025d:43211 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-08T05:51:09,739 DEBUG [RS:0;0d942cb2025d:43211 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-08T05:51:09,739 DEBUG [RS:0;0d942cb2025d:43211 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 0d942cb2025d,43211,1733637069369 2024-12-08T05:51:09,739 DEBUG [RS:0;0d942cb2025d:43211 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0d942cb2025d,43211,1733637069369' 2024-12-08T05:51:09,739 DEBUG [RS:0;0d942cb2025d:43211 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-08T05:51:09,739 DEBUG [RS:0;0d942cb2025d:43211 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-08T05:51:09,740 DEBUG [RS:0;0d942cb2025d:43211 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-08T05:51:09,740 INFO [RS:0;0d942cb2025d:43211 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-08T05:51:09,740 INFO [RS:0;0d942cb2025d:43211 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-08T05:51:09,843 INFO [RS:0;0d942cb2025d:43211 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0d942cb2025d%2C43211%2C1733637069369, suffix=, logDir=hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369, archiveDir=hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/oldWALs, maxLogs=32 2024-12-08T05:51:09,844 INFO [RS:0;0d942cb2025d:43211 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0d942cb2025d%2C43211%2C1733637069369.1733637069844 2024-12-08T05:51:09,855 WARN [0d942cb2025d:36481 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-08T05:51:09,856 INFO [RS:0;0d942cb2025d:43211 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.1733637069844 2024-12-08T05:51:09,867 DEBUG [RS:0;0d942cb2025d:43211 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35061:35061),(127.0.0.1/127.0.0.1:46757:46757)] 2024-12-08T05:51:10,105 DEBUG [0d942cb2025d:36481 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-08T05:51:10,106 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=0d942cb2025d,43211,1733637069369 2024-12-08T05:51:10,108 INFO [PEWorker-4 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0d942cb2025d,43211,1733637069369, state=OPENING 2024-12-08T05:51:10,110 DEBUG [PEWorker-4 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-08T05:51:10,111 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43211-0x10190a0445e0001, quorum=127.0.0.1:61807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:10,111 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36481-0x10190a0445e0000, quorum=127.0.0.1:61807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:10,112 DEBUG [PEWorker-4 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-08T05:51:10,112 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=0d942cb2025d,43211,1733637069369}] 2024-12-08T05:51:10,112 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T05:51:10,112 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T05:51:10,266 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-08T05:51:10,269 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56103, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-08T05:51:10,274 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-08T05:51:10,274 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T05:51:10,276 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0d942cb2025d%2C43211%2C1733637069369.meta, suffix=.meta, logDir=hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369, archiveDir=hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/oldWALs, maxLogs=32 2024-12-08T05:51:10,277 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 0d942cb2025d%2C43211%2C1733637069369.meta.1733637070276.meta 2024-12-08T05:51:10,289 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.meta.1733637070276.meta 2024-12-08T05:51:10,293 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46757:46757),(127.0.0.1/127.0.0.1:35061:35061)] 2024-12-08T05:51:10,296 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-08T05:51:10,296 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-08T05:51:10,296 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-08T05:51:10,296 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-08T05:51:10,297 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-08T05:51:10,297 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T05:51:10,297 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-08T05:51:10,297 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-08T05:51:10,300 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-08T05:51:10,301 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-08T05:51:10,301 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:51:10,302 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:51:10,302 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-08T05:51:10,303 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-08T05:51:10,303 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:51:10,304 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:51:10,304 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-08T05:51:10,305 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-08T05:51:10,305 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:51:10,305 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:51:10,305 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-08T05:51:10,306 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-08T05:51:10,306 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:51:10,307 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:51:10,307 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-08T05:51:10,308 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/data/hbase/meta/1588230740 2024-12-08T05:51:10,310 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/data/hbase/meta/1588230740 2024-12-08T05:51:10,311 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-08T05:51:10,312 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-08T05:51:10,312 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-08T05:51:10,314 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-08T05:51:10,315 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=818260, jitterRate=0.040472373366355896}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-08T05:51:10,315 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-08T05:51:10,316 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733637070297Writing region info on filesystem at 1733637070297Initializing all the Stores at 1733637070298 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733637070298Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733637070300 (+2 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733637070300Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733637070300Cleaning up temporary data from old regions at 1733637070312 (+12 ms)Running coprocessor post-open hooks at 1733637070315 (+3 ms)Region opened successfully at 1733637070316 (+1 ms) 2024-12-08T05:51:10,318 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733637070266 2024-12-08T05:51:10,320 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-08T05:51:10,320 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-08T05:51:10,321 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=0d942cb2025d,43211,1733637069369 2024-12-08T05:51:10,322 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0d942cb2025d,43211,1733637069369, state=OPEN 2024-12-08T05:51:10,327 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43211-0x10190a0445e0001, quorum=127.0.0.1:61807, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T05:51:10,328 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36481-0x10190a0445e0000, quorum=127.0.0.1:61807, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T05:51:10,328 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=0d942cb2025d,43211,1733637069369 2024-12-08T05:51:10,328 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T05:51:10,328 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T05:51:10,331 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-08T05:51:10,331 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=0d942cb2025d,43211,1733637069369 in 216 msec 2024-12-08T05:51:10,334 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-08T05:51:10,334 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 632 msec 2024-12-08T05:51:10,335 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T05:51:10,335 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-08T05:51:10,337 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-08T05:51:10,337 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=0d942cb2025d,43211,1733637069369, seqNum=-1] 2024-12-08T05:51:10,337 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T05:51:10,338 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51321, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T05:51:10,344 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 753 msec 2024-12-08T05:51:10,344 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733637070344, completionTime=-1 2024-12-08T05:51:10,344 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-08T05:51:10,344 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-08T05:51:10,346 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-08T05:51:10,346 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733637130346 2024-12-08T05:51:10,346 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733637190346 2024-12-08T05:51:10,346 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-12-08T05:51:10,346 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,36481,1733637069282-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:10,346 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,36481,1733637069282-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:10,347 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,36481,1733637069282-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:10,347 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-0d942cb2025d:36481, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:10,347 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:10,347 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:10,349 DEBUG [master/0d942cb2025d:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-08T05:51:10,351 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.935sec 2024-12-08T05:51:10,351 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-08T05:51:10,351 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-08T05:51:10,351 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-08T05:51:10,351 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-08T05:51:10,351 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-08T05:51:10,351 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,36481,1733637069282-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T05:51:10,351 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,36481,1733637069282-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-08T05:51:10,356 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-08T05:51:10,357 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-08T05:51:10,357 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,36481,1733637069282-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:10,394 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1364a50e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T05:51:10,394 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 0d942cb2025d,36481,-1 for getting cluster id 2024-12-08T05:51:10,394 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-08T05:51:10,396 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f24efdfe-3881-4cd5-bf24-a9b9085565db' 2024-12-08T05:51:10,397 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-08T05:51:10,397 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f24efdfe-3881-4cd5-bf24-a9b9085565db" 2024-12-08T05:51:10,397 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@29809f10, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T05:51:10,397 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [0d942cb2025d,36481,-1] 2024-12-08T05:51:10,397 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-08T05:51:10,398 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T05:51:10,400 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34860, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-08T05:51:10,400 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2824314f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T05:51:10,401 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-08T05:51:10,402 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=0d942cb2025d,43211,1733637069369, seqNum=-1] 2024-12-08T05:51:10,403 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T05:51:10,404 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51822, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T05:51:10,406 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=0d942cb2025d,36481,1733637069282 2024-12-08T05:51:10,407 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:51:10,410 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-08T05:51:10,411 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-12-08T05:51:10,411 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-12-08T05:51:10,411 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-08T05:51:10,412 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is 0d942cb2025d,36481,1733637069282 2024-12-08T05:51:10,412 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@68a66834 2024-12-08T05:51:10,412 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-08T05:51:10,414 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34872, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-08T05:51:10,415 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36481 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-08T05:51:10,415 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36481 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-08T05:51:10,416 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36481 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T05:51:10,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36481 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-12-08T05:51:10,419 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-12-08T05:51:10,419 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:51:10,420 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36481 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-12-08T05:51:10,421 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-08T05:51:10,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36481 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-08T05:51:10,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32899 is added to blk_1073741835_1011 (size=395) 2024-12-08T05:51:10,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41681 is added to blk_1073741835_1011 (size=395) 2024-12-08T05:51:10,442 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 240bf35d9c17fdaa81416c54f511b572, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1733637070415.240bf35d9c17fdaa81416c54f511b572.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4 2024-12-08T05:51:10,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32899 is added to blk_1073741836_1012 (size=78) 2024-12-08T05:51:10,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41681 is added to blk_1073741836_1012 (size=78) 2024-12-08T05:51:10,460 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1733637070415.240bf35d9c17fdaa81416c54f511b572.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T05:51:10,460 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing 240bf35d9c17fdaa81416c54f511b572, disabling compactions & flushes 2024-12-08T05:51:10,460 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1733637070415.240bf35d9c17fdaa81416c54f511b572. 2024-12-08T05:51:10,460 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733637070415.240bf35d9c17fdaa81416c54f511b572. 2024-12-08T05:51:10,460 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733637070415.240bf35d9c17fdaa81416c54f511b572. after waiting 0 ms 2024-12-08T05:51:10,460 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1733637070415.240bf35d9c17fdaa81416c54f511b572. 2024-12-08T05:51:10,460 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733637070415.240bf35d9c17fdaa81416c54f511b572. 2024-12-08T05:51:10,460 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for 240bf35d9c17fdaa81416c54f511b572: Waiting for close lock at 1733637070460Disabling compacts and flushes for region at 1733637070460Disabling writes for close at 1733637070460Writing region close event to WAL at 1733637070460Closed at 1733637070460 2024-12-08T05:51:10,461 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-12-08T05:51:10,462 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1733637070415.240bf35d9c17fdaa81416c54f511b572.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1733637070462"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733637070462"}]},"ts":"1733637070462"} 2024-12-08T05:51:10,464 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-08T05:51:10,465 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-08T05:51:10,466 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733637070465"}]},"ts":"1733637070465"} 2024-12-08T05:51:10,468 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-12-08T05:51:10,468 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=240bf35d9c17fdaa81416c54f511b572, ASSIGN}] 2024-12-08T05:51:10,470 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=240bf35d9c17fdaa81416c54f511b572, ASSIGN 2024-12-08T05:51:10,471 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=240bf35d9c17fdaa81416c54f511b572, ASSIGN; state=OFFLINE, location=0d942cb2025d,43211,1733637069369; forceNewPlan=false, retain=false 2024-12-08T05:51:10,621 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=240bf35d9c17fdaa81416c54f511b572, regionState=OPENING, regionLocation=0d942cb2025d,43211,1733637069369 2024-12-08T05:51:10,625 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=240bf35d9c17fdaa81416c54f511b572, ASSIGN because future has completed 2024-12-08T05:51:10,626 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 240bf35d9c17fdaa81416c54f511b572, server=0d942cb2025d,43211,1733637069369}] 2024-12-08T05:51:10,701 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:10,707 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:10,784 INFO [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1733637070415.240bf35d9c17fdaa81416c54f511b572. 2024-12-08T05:51:10,785 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 240bf35d9c17fdaa81416c54f511b572, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1733637070415.240bf35d9c17fdaa81416c54f511b572.', STARTKEY => '', ENDKEY => ''} 2024-12-08T05:51:10,785 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart 240bf35d9c17fdaa81416c54f511b572 2024-12-08T05:51:10,785 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1733637070415.240bf35d9c17fdaa81416c54f511b572.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T05:51:10,785 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 240bf35d9c17fdaa81416c54f511b572 2024-12-08T05:51:10,785 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 240bf35d9c17fdaa81416c54f511b572 2024-12-08T05:51:10,787 INFO [StoreOpener-240bf35d9c17fdaa81416c54f511b572-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 240bf35d9c17fdaa81416c54f511b572 2024-12-08T05:51:10,789 INFO [StoreOpener-240bf35d9c17fdaa81416c54f511b572-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 240bf35d9c17fdaa81416c54f511b572 columnFamilyName info 2024-12-08T05:51:10,789 DEBUG [StoreOpener-240bf35d9c17fdaa81416c54f511b572-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:51:10,789 INFO [StoreOpener-240bf35d9c17fdaa81416c54f511b572-1 {}] regionserver.HStore(327): Store=240bf35d9c17fdaa81416c54f511b572/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T05:51:10,789 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 240bf35d9c17fdaa81416c54f511b572 2024-12-08T05:51:10,790 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/data/default/TestLogRolling-testLogRollOnPipelineRestart/240bf35d9c17fdaa81416c54f511b572 2024-12-08T05:51:10,791 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/data/default/TestLogRolling-testLogRollOnPipelineRestart/240bf35d9c17fdaa81416c54f511b572 2024-12-08T05:51:10,791 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 240bf35d9c17fdaa81416c54f511b572 2024-12-08T05:51:10,791 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 240bf35d9c17fdaa81416c54f511b572 2024-12-08T05:51:10,793 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 240bf35d9c17fdaa81416c54f511b572 2024-12-08T05:51:10,797 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/data/default/TestLogRolling-testLogRollOnPipelineRestart/240bf35d9c17fdaa81416c54f511b572/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T05:51:10,797 INFO [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 240bf35d9c17fdaa81416c54f511b572; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=692963, jitterRate=-0.11885298788547516}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-08T05:51:10,797 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 240bf35d9c17fdaa81416c54f511b572 2024-12-08T05:51:10,798 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 240bf35d9c17fdaa81416c54f511b572: Running coprocessor pre-open hook at 1733637070785Writing region info on filesystem at 1733637070785Initializing all the Stores at 1733637070786 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733637070786Cleaning up temporary data from old regions at 1733637070791 (+5 ms)Running coprocessor post-open hooks at 1733637070797 (+6 ms)Region opened successfully at 1733637070798 (+1 ms) 2024-12-08T05:51:10,800 INFO [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1733637070415.240bf35d9c17fdaa81416c54f511b572., pid=6, masterSystemTime=1733637070779 2024-12-08T05:51:10,803 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1733637070415.240bf35d9c17fdaa81416c54f511b572. 2024-12-08T05:51:10,803 INFO [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1733637070415.240bf35d9c17fdaa81416c54f511b572. 2024-12-08T05:51:10,804 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=240bf35d9c17fdaa81416c54f511b572, regionState=OPEN, openSeqNum=2, regionLocation=0d942cb2025d,43211,1733637069369 2024-12-08T05:51:10,807 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 240bf35d9c17fdaa81416c54f511b572, server=0d942cb2025d,43211,1733637069369 because future has completed 2024-12-08T05:51:10,812 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-08T05:51:10,812 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 240bf35d9c17fdaa81416c54f511b572, server=0d942cb2025d,43211,1733637069369 in 184 msec 2024-12-08T05:51:10,815 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-08T05:51:10,816 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=240bf35d9c17fdaa81416c54f511b572, ASSIGN in 344 msec 2024-12-08T05:51:10,817 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-08T05:51:10,817 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733637070817"}]},"ts":"1733637070817"} 2024-12-08T05:51:10,820 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-12-08T05:51:10,821 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-12-08T05:51:10,824 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 405 msec 2024-12-08T05:51:11,702 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:11,708 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:12,702 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:12,708 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:13,703 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:13,709 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:14,704 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:14,709 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:15,704 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:15,710 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:15,799 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-08T05:51:15,816 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:15,816 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:15,817 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:15,817 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:15,817 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:15,818 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:15,823 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:15,823 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:15,824 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:15,827 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:15,833 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-08T05:51:15,833 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-08T05:51:15,833 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-12-08T05:51:15,834 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-12-08T05:51:15,834 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-08T05:51:15,834 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-08T05:51:15,834 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-08T05:51:15,835 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-12-08T05:51:16,705 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:16,710 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:17,706 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:17,711 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:18,706 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:18,711 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:19,707 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:19,712 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:20,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36481 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-08T05:51:20,517 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-12-08T05:51:20,517 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-12-08T05:51:20,521 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-12-08T05:51:20,521 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1733637070415.240bf35d9c17fdaa81416c54f511b572. 2024-12-08T05:51:20,525 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1733637070415.240bf35d9c17fdaa81416c54f511b572., hostname=0d942cb2025d,43211,1733637069369, seqNum=2] 2024-12-08T05:51:20,708 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:20,713 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:21,708 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:21,713 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:22,528 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.1733637069844 2024-12-08T05:51:22,529 WARN [ResponseProcessor for block BP-1308812349-172.17.0.2-1733637068326:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1308812349-172.17.0.2-1733637068326:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:51:22,529 WARN [ResponseProcessor for block BP-1308812349-172.17.0.2-1733637068326:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1308812349-172.17.0.2-1733637068326:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-1308812349-172.17.0.2-1733637068326:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:32899,DS-17a3c984-c607-4535-a9ef-b49c60ee4fab,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:51:22,529 WARN [DataStreamer for file /user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.1733637069844 block BP-1308812349-172.17.0.2-1733637068326:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1308812349-172.17.0.2-1733637068326:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32899,DS-17a3c984-c607-4535-a9ef-b49c60ee4fab,DISK], DatanodeInfoWithStorage[127.0.0.1:41681,DS-95d0f3e1-c73e-44c0-bdc2-8a5d260db9d3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32899,DS-17a3c984-c607-4535-a9ef-b49c60ee4fab,DISK]) is bad. 2024-12-08T05:51:22,530 WARN [PacketResponder: BP-1308812349-172.17.0.2-1733637068326:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:32899] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:51:22,530 WARN [DataStreamer for file /user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.meta.1733637070276.meta block BP-1308812349-172.17.0.2-1733637068326:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1308812349-172.17.0.2-1733637068326:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41681,DS-95d0f3e1-c73e-44c0-bdc2-8a5d260db9d3,DISK], DatanodeInfoWithStorage[127.0.0.1:32899,DS-17a3c984-c607-4535-a9ef-b49c60ee4fab,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:32899,DS-17a3c984-c607-4535-a9ef-b49c60ee4fab,DISK]) is bad. 2024-12-08T05:51:22,530 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1925450250_22 at /127.0.0.1:39622 [Receiving block BP-1308812349-172.17.0.2-1733637068326:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:41681:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39622 dst: /127.0.0.1:41681 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:51:22,530 WARN [ResponseProcessor for block BP-1308812349-172.17.0.2-1733637068326:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1308812349-172.17.0.2-1733637068326:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-1308812349-172.17.0.2-1733637068326:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:32899,DS-17a3c984-c607-4535-a9ef-b49c60ee4fab,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:51:22,531 WARN [DataStreamer for file /user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/MasterData/WALs/0d942cb2025d,36481,1733637069282/0d942cb2025d%2C36481%2C1733637069282.1733637069481 block BP-1308812349-172.17.0.2-1733637068326:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1308812349-172.17.0.2-1733637068326:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41681,DS-95d0f3e1-c73e-44c0-bdc2-8a5d260db9d3,DISK], DatanodeInfoWithStorage[127.0.0.1:32899,DS-17a3c984-c607-4535-a9ef-b49c60ee4fab,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:32899,DS-17a3c984-c607-4535-a9ef-b49c60ee4fab,DISK]) is bad. 2024-12-08T05:51:22,531 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1925450250_22 at /127.0.0.1:48496 [Receiving block BP-1308812349-172.17.0.2-1733637068326:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:32899:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48496 dst: /127.0.0.1:32899 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:51:22,531 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1925450250_22 at /127.0.0.1:48500 [Receiving block BP-1308812349-172.17.0.2-1733637068326:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:32899:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48500 dst: /127.0.0.1:32899 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:51:22,531 WARN [PacketResponder: BP-1308812349-172.17.0.2-1733637068326:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:32899] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:51:22,531 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1925450250_22 at /127.0.0.1:39598 [Receiving block BP-1308812349-172.17.0.2-1733637068326:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:41681:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39598 dst: /127.0.0.1:41681 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:51:22,531 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1645738793_22 at /127.0.0.1:39566 [Receiving block BP-1308812349-172.17.0.2-1733637068326:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:41681:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39566 dst: /127.0.0.1:41681 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:51:22,532 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1645738793_22 at /127.0.0.1:48460 [Receiving block BP-1308812349-172.17.0.2-1733637068326:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:32899:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48460 dst: /127.0.0.1:32899 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:51:22,534 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2703b5c7{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T05:51:22,535 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@172ad5fa{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T05:51:22,535 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T05:51:22,535 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@10a74eb4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T05:51:22,535 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@778fe5bf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f73842bd-509c-ac2e-22fb-16b6ad6bb8f8/hadoop.log.dir/,STOPPED} 2024-12-08T05:51:22,538 WARN [BP-1308812349-172.17.0.2-1733637068326 heartbeating to localhost/127.0.0.1:34501 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T05:51:22,538 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T05:51:22,538 WARN [BP-1308812349-172.17.0.2-1733637068326 heartbeating to localhost/127.0.0.1:34501 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1308812349-172.17.0.2-1733637068326 (Datanode Uuid 0f32638d-08d9-44b7-8a7c-68c5a3f38827) service to localhost/127.0.0.1:34501 2024-12-08T05:51:22,538 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T05:51:22,539 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f73842bd-509c-ac2e-22fb-16b6ad6bb8f8/cluster_8a42907d-6224-13e1-832f-048855039e98/data/data3/current/BP-1308812349-172.17.0.2-1733637068326 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T05:51:22,539 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f73842bd-509c-ac2e-22fb-16b6ad6bb8f8/cluster_8a42907d-6224-13e1-832f-048855039e98/data/data4/current/BP-1308812349-172.17.0.2-1733637068326 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T05:51:22,539 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T05:51:22,559 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T05:51:22,564 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T05:51:22,565 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T05:51:22,565 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T05:51:22,565 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T05:51:22,566 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7dbae5f8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f73842bd-509c-ac2e-22fb-16b6ad6bb8f8/hadoop.log.dir/,AVAILABLE} 2024-12-08T05:51:22,566 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@371cfc09{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T05:51:22,705 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7bc0d2af{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f73842bd-509c-ac2e-22fb-16b6ad6bb8f8/java.io.tmpdir/jetty-localhost-35665-hadoop-hdfs-3_4_1-tests_jar-_-any-5443462856452099710/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T05:51:22,705 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5d52def{HTTP/1.1, (http/1.1)}{localhost:35665} 2024-12-08T05:51:22,705 INFO [Time-limited test {}] server.Server(415): Started @162889ms 2024-12-08T05:51:22,707 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T05:51:22,709 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:22,714 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:22,734 WARN [ResponseProcessor for block BP-1308812349-172.17.0.2-1733637068326:blk_1073741833_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1308812349-172.17.0.2-1733637068326:blk_1073741833_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:51:22,734 WARN [ResponseProcessor for block BP-1308812349-172.17.0.2-1733637068326:blk_1073741834_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1308812349-172.17.0.2-1733637068326:blk_1073741834_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:51:22,734 WARN [ResponseProcessor for block BP-1308812349-172.17.0.2-1733637068326:blk_1073741830_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1308812349-172.17.0.2-1733637068326:blk_1073741830_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:51:22,734 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1925450250_22 at /127.0.0.1:38552 [Receiving block BP-1308812349-172.17.0.2-1733637068326:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:41681:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38552 dst: /127.0.0.1:41681 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:51:22,735 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1645738793_22 at /127.0.0.1:38566 [Receiving block BP-1308812349-172.17.0.2-1733637068326:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:41681:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38566 dst: /127.0.0.1:41681 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:51:22,734 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1925450250_22 at /127.0.0.1:38580 [Receiving block BP-1308812349-172.17.0.2-1733637068326:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:41681:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38580 dst: /127.0.0.1:41681 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:51:22,741 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@416b876f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T05:51:22,741 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@25c86e24{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T05:51:22,741 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T05:51:22,742 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6ccc2c47{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T05:51:22,742 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@65985ee{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f73842bd-509c-ac2e-22fb-16b6ad6bb8f8/hadoop.log.dir/,STOPPED} 2024-12-08T05:51:22,743 WARN [BP-1308812349-172.17.0.2-1733637068326 heartbeating to localhost/127.0.0.1:34501 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T05:51:22,743 WARN [BP-1308812349-172.17.0.2-1733637068326 heartbeating to localhost/127.0.0.1:34501 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1308812349-172.17.0.2-1733637068326 (Datanode Uuid b33389dd-32ed-4bbb-ad35-fee83d497013) service to localhost/127.0.0.1:34501 2024-12-08T05:51:22,743 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T05:51:22,743 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T05:51:22,745 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f73842bd-509c-ac2e-22fb-16b6ad6bb8f8/cluster_8a42907d-6224-13e1-832f-048855039e98/data/data1/current/BP-1308812349-172.17.0.2-1733637068326 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T05:51:22,746 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f73842bd-509c-ac2e-22fb-16b6ad6bb8f8/cluster_8a42907d-6224-13e1-832f-048855039e98/data/data2/current/BP-1308812349-172.17.0.2-1733637068326 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T05:51:22,746 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T05:51:22,758 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T05:51:22,762 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T05:51:22,765 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T05:51:22,765 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T05:51:22,765 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T05:51:22,766 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6f3035e0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f73842bd-509c-ac2e-22fb-16b6ad6bb8f8/hadoop.log.dir/,AVAILABLE} 2024-12-08T05:51:22,767 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@38470c44{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T05:51:22,825 WARN [Thread-1334 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T05:51:22,827 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xebcef7d6387c88d3 with lease ID 0xe044214c10bc1ab5: from storage DS-17a3c984-c607-4535-a9ef-b49c60ee4fab node DatanodeRegistration(127.0.0.1:35149, datanodeUuid=0f32638d-08d9-44b7-8a7c-68c5a3f38827, infoPort=46409, infoSecurePort=0, ipcPort=40365, storageInfo=lv=-57;cid=testClusterID;nsid=831063879;c=1733637068326), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T05:51:22,828 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xebcef7d6387c88d3 with lease ID 0xe044214c10bc1ab5: from storage DS-2708d9c2-7d44-485b-a284-da2c11fc0327 node DatanodeRegistration(127.0.0.1:35149, datanodeUuid=0f32638d-08d9-44b7-8a7c-68c5a3f38827, infoPort=46409, infoSecurePort=0, ipcPort=40365, storageInfo=lv=-57;cid=testClusterID;nsid=831063879;c=1733637068326), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-08T05:51:22,998 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@45aa5635{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f73842bd-509c-ac2e-22fb-16b6ad6bb8f8/java.io.tmpdir/jetty-localhost-34867-hadoop-hdfs-3_4_1-tests_jar-_-any-11920750143562492756/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T05:51:22,998 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@64084174{HTTP/1.1, (http/1.1)}{localhost:34867} 2024-12-08T05:51:22,998 INFO [Time-limited test {}] server.Server(415): Started @163182ms 2024-12-08T05:51:23,000 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T05:51:23,115 WARN [Thread-1365 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T05:51:23,118 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3f25444b9c2ec0e with lease ID 0xe044214c10bc1ab6: from storage DS-95d0f3e1-c73e-44c0-bdc2-8a5d260db9d3 node DatanodeRegistration(127.0.0.1:36899, datanodeUuid=b33389dd-32ed-4bbb-ad35-fee83d497013, infoPort=43415, infoSecurePort=0, ipcPort=42915, storageInfo=lv=-57;cid=testClusterID;nsid=831063879;c=1733637068326), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T05:51:23,119 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3f25444b9c2ec0e with lease ID 0xe044214c10bc1ab6: from storage DS-c0829470-c149-4e17-a594-a6372c390c46 node DatanodeRegistration(127.0.0.1:36899, datanodeUuid=b33389dd-32ed-4bbb-ad35-fee83d497013, infoPort=43415, infoSecurePort=0, ipcPort=42915, storageInfo=lv=-57;cid=testClusterID;nsid=831063879;c=1733637068326), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-08T05:51:23,710 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:23,714 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:24,026 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-12-08T05:51:24,029 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-12-08T05:51:24,031 ERROR [FSHLog-0-hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4-prefix:0d942cb2025d,43211,1733637069369 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41681,DS-95d0f3e1-c73e-44c0-bdc2-8a5d260db9d3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:51:24,031 WARN [FSHLog-0-hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4-prefix:0d942cb2025d,43211,1733637069369 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41681,DS-95d0f3e1-c73e-44c0-bdc2-8a5d260db9d3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:51:24,031 DEBUG [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 0d942cb2025d%2C43211%2C1733637069369:(num 1733637069844) roll requested 2024-12-08T05:51:24,031 INFO [regionserver/0d942cb2025d:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0d942cb2025d%2C43211%2C1733637069369.1733637084031 2024-12-08T05:51:24,042 DEBUG [regionserver/0d942cb2025d:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.1733637069844 newFile=hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.1733637084031 2024-12-08T05:51:24,042 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:24,042 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:24,042 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:24,043 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:24,043 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:24,043 INFO [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.1733637069844 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.1733637084031 2024-12-08T05:51:24,043 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41681,DS-95d0f3e1-c73e-44c0-bdc2-8a5d260db9d3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:51:24,044 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41681,DS-95d0f3e1-c73e-44c0-bdc2-8a5d260db9d3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:51:24,044 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.1733637069844 2024-12-08T05:51:24,044 WARN [IPC Server handler 3 on default port 34501 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.1733637069844 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1013 2024-12-08T05:51:24,045 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.1733637069844 after 1ms 2024-12-08T05:51:24,047 DEBUG [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43415:43415),(127.0.0.1/127.0.0.1:46409:46409)] 2024-12-08T05:51:24,047 DEBUG [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.1733637069844 is not closed yet, will try archiving it next time 2024-12-08T05:51:24,711 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:24,715 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:25,711 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:25,716 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:26,051 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-12-08T05:51:26,712 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:26,716 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:26,828 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1013: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-08T05:51:27,713 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:27,717 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:28,045 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.1733637069844 after 4001ms 2024-12-08T05:51:28,054 WARN [ResponseProcessor for block BP-1308812349-172.17.0.2-1733637068326:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1308812349-172.17.0.2-1733637068326:blk_1073741837_1016 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:51:28,055 WARN [DataStreamer for file /user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.1733637084031 block BP-1308812349-172.17.0.2-1733637068326:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1308812349-172.17.0.2-1733637068326:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36899,DS-95d0f3e1-c73e-44c0-bdc2-8a5d260db9d3,DISK], DatanodeInfoWithStorage[127.0.0.1:35149,DS-17a3c984-c607-4535-a9ef-b49c60ee4fab,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36899,DS-95d0f3e1-c73e-44c0-bdc2-8a5d260db9d3,DISK]) is bad. 2024-12-08T05:51:28,055 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1925450250_22 at /127.0.0.1:35212 [Receiving block BP-1308812349-172.17.0.2-1733637068326:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:36899:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35212 dst: /127.0.0.1:36899 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:51:28,055 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1925450250_22 at /127.0.0.1:50424 [Receiving block BP-1308812349-172.17.0.2-1733637068326:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:35149:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50424 dst: /127.0.0.1:35149 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:51:28,057 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@45aa5635{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T05:51:28,058 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@64084174{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T05:51:28,058 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T05:51:28,058 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@38470c44{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T05:51:28,058 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6f3035e0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f73842bd-509c-ac2e-22fb-16b6ad6bb8f8/hadoop.log.dir/,STOPPED} 2024-12-08T05:51:28,059 WARN [BP-1308812349-172.17.0.2-1733637068326 heartbeating to localhost/127.0.0.1:34501 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T05:51:28,059 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T05:51:28,059 WARN [BP-1308812349-172.17.0.2-1733637068326 heartbeating to localhost/127.0.0.1:34501 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1308812349-172.17.0.2-1733637068326 (Datanode Uuid b33389dd-32ed-4bbb-ad35-fee83d497013) service to localhost/127.0.0.1:34501 2024-12-08T05:51:28,059 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T05:51:28,060 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f73842bd-509c-ac2e-22fb-16b6ad6bb8f8/cluster_8a42907d-6224-13e1-832f-048855039e98/data/data1/current/BP-1308812349-172.17.0.2-1733637068326 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T05:51:28,060 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f73842bd-509c-ac2e-22fb-16b6ad6bb8f8/cluster_8a42907d-6224-13e1-832f-048855039e98/data/data2/current/BP-1308812349-172.17.0.2-1733637068326 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T05:51:28,060 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T05:51:28,068 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T05:51:28,072 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T05:51:28,075 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T05:51:28,075 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T05:51:28,075 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T05:51:28,078 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@190c30f1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f73842bd-509c-ac2e-22fb-16b6ad6bb8f8/hadoop.log.dir/,AVAILABLE} 2024-12-08T05:51:28,079 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7dea1853{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T05:51:28,207 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@67eaa136{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f73842bd-509c-ac2e-22fb-16b6ad6bb8f8/java.io.tmpdir/jetty-localhost-44271-hadoop-hdfs-3_4_1-tests_jar-_-any-10783734842707182785/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T05:51:28,208 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3a2d885f{HTTP/1.1, (http/1.1)}{localhost:44271} 2024-12-08T05:51:28,208 INFO [Time-limited test {}] server.Server(415): Started @168392ms 2024-12-08T05:51:28,209 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T05:51:28,238 WARN [ResponseProcessor for block BP-1308812349-172.17.0.2-1733637068326:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1308812349-172.17.0.2-1733637068326:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:51:28,238 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1925450250_22 at /127.0.0.1:50442 [Receiving block BP-1308812349-172.17.0.2-1733637068326:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:35149:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50442 dst: /127.0.0.1:35149 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:51:28,247 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7bc0d2af{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T05:51:28,247 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5d52def{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T05:51:28,247 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T05:51:28,248 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@371cfc09{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T05:51:28,248 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7dbae5f8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f73842bd-509c-ac2e-22fb-16b6ad6bb8f8/hadoop.log.dir/,STOPPED} 2024-12-08T05:51:28,251 WARN [BP-1308812349-172.17.0.2-1733637068326 heartbeating to localhost/127.0.0.1:34501 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T05:51:28,251 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T05:51:28,251 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T05:51:28,251 WARN [BP-1308812349-172.17.0.2-1733637068326 heartbeating to localhost/127.0.0.1:34501 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1308812349-172.17.0.2-1733637068326 (Datanode Uuid 0f32638d-08d9-44b7-8a7c-68c5a3f38827) service to localhost/127.0.0.1:34501 2024-12-08T05:51:28,251 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f73842bd-509c-ac2e-22fb-16b6ad6bb8f8/cluster_8a42907d-6224-13e1-832f-048855039e98/data/data3/current/BP-1308812349-172.17.0.2-1733637068326 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T05:51:28,252 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f73842bd-509c-ac2e-22fb-16b6ad6bb8f8/cluster_8a42907d-6224-13e1-832f-048855039e98/data/data4/current/BP-1308812349-172.17.0.2-1733637068326 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T05:51:28,252 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T05:51:28,265 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T05:51:28,269 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T05:51:28,270 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T05:51:28,270 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T05:51:28,270 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-08T05:51:28,271 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@38da3df6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f73842bd-509c-ac2e-22fb-16b6ad6bb8f8/hadoop.log.dir/,AVAILABLE} 2024-12-08T05:51:28,271 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2bc4f0c3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T05:51:28,313 WARN [Thread-1408 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T05:51:28,316 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x67416964df1b7efc with lease ID 0xe044214c10bc1ab7: from storage DS-95d0f3e1-c73e-44c0-bdc2-8a5d260db9d3 node DatanodeRegistration(127.0.0.1:34007, datanodeUuid=b33389dd-32ed-4bbb-ad35-fee83d497013, infoPort=36737, infoSecurePort=0, ipcPort=42173, storageInfo=lv=-57;cid=testClusterID;nsid=831063879;c=1733637068326), blocks: 7, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-08T05:51:28,316 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x67416964df1b7efc with lease ID 0xe044214c10bc1ab7: from storage DS-c0829470-c149-4e17-a594-a6372c390c46 node DatanodeRegistration(127.0.0.1:34007, datanodeUuid=b33389dd-32ed-4bbb-ad35-fee83d497013, infoPort=36737, infoSecurePort=0, ipcPort=42173, storageInfo=lv=-57;cid=testClusterID;nsid=831063879;c=1733637068326), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T05:51:28,385 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4a43388a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f73842bd-509c-ac2e-22fb-16b6ad6bb8f8/java.io.tmpdir/jetty-localhost-35835-hadoop-hdfs-3_4_1-tests_jar-_-any-16261382753890524848/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T05:51:28,385 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6e90a91d{HTTP/1.1, (http/1.1)}{localhost:35835} 2024-12-08T05:51:28,385 INFO [Time-limited test {}] server.Server(415): Started @168569ms 2024-12-08T05:51:28,387 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T05:51:28,483 WARN [Thread-1439 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T05:51:28,485 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4c6c77b987cc9d31 with lease ID 0xe044214c10bc1ab8: from storage DS-17a3c984-c607-4535-a9ef-b49c60ee4fab node DatanodeRegistration(127.0.0.1:32791, datanodeUuid=0f32638d-08d9-44b7-8a7c-68c5a3f38827, infoPort=39953, infoSecurePort=0, ipcPort=44859, storageInfo=lv=-57;cid=testClusterID;nsid=831063879;c=1733637068326), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T05:51:28,485 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4c6c77b987cc9d31 with lease ID 0xe044214c10bc1ab8: from storage DS-2708d9c2-7d44-485b-a284-da2c11fc0327 node DatanodeRegistration(127.0.0.1:32791, datanodeUuid=0f32638d-08d9-44b7-8a7c-68c5a3f38827, infoPort=39953, infoSecurePort=0, ipcPort=44859, storageInfo=lv=-57;cid=testClusterID;nsid=831063879;c=1733637068326), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T05:51:28,713 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:28,717 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:29,405 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-12-08T05:51:29,407 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-12-08T05:51:29,408 ERROR [FSHLog-0-hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4-prefix:0d942cb2025d,43211,1733637069369 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35149,DS-17a3c984-c607-4535-a9ef-b49c60ee4fab,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:51:29,409 WARN [FSHLog-0-hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4-prefix:0d942cb2025d,43211,1733637069369 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35149,DS-17a3c984-c607-4535-a9ef-b49c60ee4fab,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:51:29,409 DEBUG [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 0d942cb2025d%2C43211%2C1733637069369:(num 1733637084031) roll requested 2024-12-08T05:51:29,409 INFO [regionserver/0d942cb2025d:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0d942cb2025d%2C43211%2C1733637069369.1733637089409 2024-12-08T05:51:29,414 DEBUG [regionserver/0d942cb2025d:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.1733637084031 newFile=hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.1733637089409 2024-12-08T05:51:29,415 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:29,415 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:29,415 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:29,415 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:29,415 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:29,415 INFO [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.1733637084031 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.1733637089409 2024-12-08T05:51:29,415 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35149,DS-17a3c984-c607-4535-a9ef-b49c60ee4fab,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:51:29,416 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35149,DS-17a3c984-c607-4535-a9ef-b49c60ee4fab,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:51:29,416 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.1733637084031 2024-12-08T05:51:29,416 WARN [IPC Server handler 0 on default port 34501 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.1733637084031 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-12-08T05:51:29,416 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.1733637084031 after 0ms 2024-12-08T05:51:29,417 DEBUG [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39953:39953),(127.0.0.1/127.0.0.1:36737:36737)] 2024-12-08T05:51:29,417 DEBUG [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.1733637084031 is not closed yet, will try archiving it next time 2024-12-08T05:51:29,714 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:29,717 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:30,714 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:30,718 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:31,419 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0d942cb2025d%2C43211%2C1733637069369.1733637091419 2024-12-08T05:51:31,425 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.1733637089409 newFile=hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.1733637091419 2024-12-08T05:51:31,425 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:31,425 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:31,425 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:31,425 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:31,426 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:31,426 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.1733637089409 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.1733637091419 2024-12-08T05:51:31,426 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36737:36737),(127.0.0.1/127.0.0.1:39953:39953)] 2024-12-08T05:51:31,427 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.1733637084031 is not closed yet, will try archiving it next time 2024-12-08T05:51:31,427 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.1733637089409 is not closed yet, will try archiving it next time 2024-12-08T05:51:31,427 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.1733637069844 2024-12-08T05:51:31,427 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.1733637069844 2024-12-08T05:51:31,427 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.1733637069844 after 0ms 2024-12-08T05:51:31,427 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.1733637069844 2024-12-08T05:51:31,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34007 is added to blk_1073741838_1019 (size=1264) 2024-12-08T05:51:31,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32791 is added to blk_1073741838_1019 (size=1264) 2024-12-08T05:51:31,428 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.1733637084031 is not closed yet, will try archiving it next time 2024-12-08T05:51:31,438 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1733637070798/Put/vlen=218/seqid=0] 2024-12-08T05:51:31,438 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1733637080526/Put/vlen=1045/seqid=0] 2024-12-08T05:51:31,438 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.1733637069844 2024-12-08T05:51:31,438 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.1733637084031 2024-12-08T05:51:31,438 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.1733637084031 2024-12-08T05:51:31,438 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.1733637084031 after 0ms 2024-12-08T05:51:31,439 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.1733637084031 2024-12-08T05:51:31,442 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1733637084030/Put/vlen=1045/seqid=0] 2024-12-08T05:51:31,442 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1733637086053/Put/vlen=1045/seqid=0] 2024-12-08T05:51:31,442 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.1733637084031 2024-12-08T05:51:31,442 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.1733637089409 2024-12-08T05:51:31,442 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.1733637089409 2024-12-08T05:51:31,442 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.1733637089409 after 0ms 2024-12-08T05:51:31,442 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.1733637089409 2024-12-08T05:51:31,445 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1733637089408/Put/vlen=1045/seqid=0] 2024-12-08T05:51:31,445 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.1733637091419 2024-12-08T05:51:31,445 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.1733637091419 2024-12-08T05:51:31,446 WARN [IPC Server handler 4 on default port 34501 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.1733637091419 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-12-08T05:51:31,446 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.1733637091419 after 1ms 2024-12-08T05:51:31,715 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:31,718 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:32,316 WARN [ResponseProcessor for block BP-1308812349-172.17.0.2-1733637068326:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1308812349-172.17.0.2-1733637068326:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:51:32,316 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1645738793_22 at /127.0.0.1:41004 [Receiving block BP-1308812349-172.17.0.2-1733637068326:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:34007:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41004 dst: /127.0.0.1:34007 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:34007 remote=/127.0.0.1:41004]. Total timeout mills is 60000, 59109 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:51:32,316 WARN [DataStreamer for file /user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.1733637091419 block BP-1308812349-172.17.0.2-1733637068326:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1308812349-172.17.0.2-1733637068326:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34007,DS-95d0f3e1-c73e-44c0-bdc2-8a5d260db9d3,DISK], DatanodeInfoWithStorage[127.0.0.1:32791,DS-17a3c984-c607-4535-a9ef-b49c60ee4fab,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34007,DS-95d0f3e1-c73e-44c0-bdc2-8a5d260db9d3,DISK]) is bad. 2024-12-08T05:51:32,316 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1645738793_22 at /127.0.0.1:57460 [Receiving block BP-1308812349-172.17.0.2-1733637068326:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:32791:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57460 dst: /127.0.0.1:32791 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:51:32,317 WARN [DataStreamer for file /user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.1733637091419 block BP-1308812349-172.17.0.2-1733637068326:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1308812349-172.17.0.2-1733637068326:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:51:32,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34007 is added to blk_1073741839_1022 (size=85) 2024-12-08T05:51:32,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32791 is added to blk_1073741839_1022 (size=85) 2024-12-08T05:51:32,716 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:32,719 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:33,315 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-08T05:51:33,418 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.1733637084031 after 4001ms 2024-12-08T05:51:33,716 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:33,719 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:34,717 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:34,720 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:35,447 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.1733637091419 after 4001ms 2024-12-08T05:51:35,447 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.1733637091419 2024-12-08T05:51:35,451 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.1733637091419 2024-12-08T05:51:35,451 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 240bf35d9c17fdaa81416c54f511b572 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-12-08T05:51:35,451 ERROR [FSHLog-0-hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4-prefix:0d942cb2025d,43211,1733637069369 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1308812349-172.17.0.2-1733637068326:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:51:35,452 WARN [FSHLog-0-hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4-prefix:0d942cb2025d,43211,1733637069369 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1308812349-172.17.0.2-1733637068326:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:51:35,452 DEBUG [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 0d942cb2025d%2C43211%2C1733637069369:(num 1733637091419) roll requested 2024-12-08T05:51:35,452 INFO [regionserver/0d942cb2025d:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0d942cb2025d%2C43211%2C1733637069369.1733637095452 2024-12-08T05:51:35,457 DEBUG [regionserver/0d942cb2025d:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.1733637091419 newFile=hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.1733637095452 2024-12-08T05:51:35,457 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:35,457 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:35,458 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:35,458 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:35,458 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:35,458 INFO [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.1733637091419 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.1733637095452 2024-12-08T05:51:35,458 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1308812349-172.17.0.2-1733637068326:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:51:35,458 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1308812349-172.17.0.2-1733637068326:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:51:35,459 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.1733637091419 2024-12-08T05:51:35,459 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.1733637091419 after 0ms 2024-12-08T05:51:35,459 DEBUG [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36737:36737),(127.0.0.1/127.0.0.1:39953:39953)] 2024-12-08T05:51:35,459 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.1733637091419 to hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/oldWALs/0d942cb2025d%2C43211%2C1733637069369.1733637091419 2024-12-08T05:51:35,475 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/data/default/TestLogRolling-testLogRollOnPipelineRestart/240bf35d9c17fdaa81416c54f511b572/.tmp/info/3a23b6da2ce440d5b07bd1f63c83f5fe is 1080, key is row1002/info:/1733637080526/Put/seqid=0 2024-12-08T05:51:35,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32791 is added to blk_1073741841_1024 (size=9270) 2024-12-08T05:51:35,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34007 is added to blk_1073741841_1024 (size=9270) 2024-12-08T05:51:35,480 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/data/default/TestLogRolling-testLogRollOnPipelineRestart/240bf35d9c17fdaa81416c54f511b572/.tmp/info/3a23b6da2ce440d5b07bd1f63c83f5fe 2024-12-08T05:51:35,487 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/data/default/TestLogRolling-testLogRollOnPipelineRestart/240bf35d9c17fdaa81416c54f511b572/.tmp/info/3a23b6da2ce440d5b07bd1f63c83f5fe as hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/data/default/TestLogRolling-testLogRollOnPipelineRestart/240bf35d9c17fdaa81416c54f511b572/info/3a23b6da2ce440d5b07bd1f63c83f5fe 2024-12-08T05:51:35,493 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/data/default/TestLogRolling-testLogRollOnPipelineRestart/240bf35d9c17fdaa81416c54f511b572/info/3a23b6da2ce440d5b07bd1f63c83f5fe, entries=4, sequenceid=8, filesize=9.1 K 2024-12-08T05:51:35,494 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for 240bf35d9c17fdaa81416c54f511b572 in 43ms, sequenceid=8, compaction requested=false 2024-12-08T05:51:35,494 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 240bf35d9c17fdaa81416c54f511b572: 2024-12-08T05:51:35,494 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-12-08T05:51:35,495 ERROR [FSHLog-0-hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4-prefix:0d942cb2025d,43211,1733637069369.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41681,DS-95d0f3e1-c73e-44c0-bdc2-8a5d260db9d3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:51:35,495 WARN [FSHLog-0-hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4-prefix:0d942cb2025d,43211,1733637069369.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41681,DS-95d0f3e1-c73e-44c0-bdc2-8a5d260db9d3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:51:35,495 DEBUG [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 0d942cb2025d%2C43211%2C1733637069369.meta:.meta(num 1733637070276) roll requested 2024-12-08T05:51:35,495 INFO [regionserver/0d942cb2025d:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0d942cb2025d%2C43211%2C1733637069369.meta.1733637095495.meta 2024-12-08T05:51:35,499 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:35,500 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:35,500 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:35,500 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:35,500 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:35,500 INFO [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.meta.1733637070276.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.meta.1733637095495.meta 2024-12-08T05:51:35,500 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41681,DS-95d0f3e1-c73e-44c0-bdc2-8a5d260db9d3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:51:35,500 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41681,DS-95d0f3e1-c73e-44c0-bdc2-8a5d260db9d3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:51:35,500 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.meta.1733637070276.meta 2024-12-08T05:51:35,501 DEBUG [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36737:36737),(127.0.0.1/127.0.0.1:39953:39953)] 2024-12-08T05:51:35,501 DEBUG [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.meta.1733637070276.meta is not closed yet, will try archiving it next time 2024-12-08T05:51:35,501 WARN [IPC Server handler 3 on default port 34501 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.meta.1733637070276.meta has not been closed. Lease recovery is in progress. RecoveryId = 1026 for block blk_1073741834_1014 2024-12-08T05:51:35,501 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.meta.1733637070276.meta after 1ms 2024-12-08T05:51:35,515 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/data/hbase/meta/1588230740/.tmp/info/9f2236f27c114d7183843c2e2a1f344a is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1733637070415.240bf35d9c17fdaa81416c54f511b572./info:regioninfo/1733637070804/Put/seqid=0 2024-12-08T05:51:35,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34007 is added to blk_1073741843_1027 (size=7125) 2024-12-08T05:51:35,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32791 is added to blk_1073741843_1027 (size=7125) 2024-12-08T05:51:35,521 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/data/hbase/meta/1588230740/.tmp/info/9f2236f27c114d7183843c2e2a1f344a 2024-12-08T05:51:35,539 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/data/hbase/meta/1588230740/.tmp/ns/2384f9672a3d4525827f25f4548f3b69 is 43, key is default/ns:d/1733637070339/Put/seqid=0 2024-12-08T05:51:35,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32791 is added to blk_1073741844_1028 (size=5153) 2024-12-08T05:51:35,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34007 is added to blk_1073741844_1028 (size=5153) 2024-12-08T05:51:35,544 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/data/hbase/meta/1588230740/.tmp/ns/2384f9672a3d4525827f25f4548f3b69 2024-12-08T05:51:35,562 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/data/hbase/meta/1588230740/.tmp/table/56c99be0d7d64a99a46ea0460939f88d is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1733637070817/Put/seqid=0 2024-12-08T05:51:35,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34007 is added to blk_1073741845_1029 (size=5438) 2024-12-08T05:51:35,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32791 is added to blk_1073741845_1029 (size=5438) 2024-12-08T05:51:35,567 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/data/hbase/meta/1588230740/.tmp/table/56c99be0d7d64a99a46ea0460939f88d 2024-12-08T05:51:35,573 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/data/hbase/meta/1588230740/.tmp/info/9f2236f27c114d7183843c2e2a1f344a as hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/data/hbase/meta/1588230740/info/9f2236f27c114d7183843c2e2a1f344a 2024-12-08T05:51:35,578 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/data/hbase/meta/1588230740/info/9f2236f27c114d7183843c2e2a1f344a, entries=10, sequenceid=11, filesize=7.0 K 2024-12-08T05:51:35,578 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/data/hbase/meta/1588230740/.tmp/ns/2384f9672a3d4525827f25f4548f3b69 as hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/data/hbase/meta/1588230740/ns/2384f9672a3d4525827f25f4548f3b69 2024-12-08T05:51:35,583 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/data/hbase/meta/1588230740/ns/2384f9672a3d4525827f25f4548f3b69, entries=2, sequenceid=11, filesize=5.0 K 2024-12-08T05:51:35,584 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/data/hbase/meta/1588230740/.tmp/table/56c99be0d7d64a99a46ea0460939f88d as hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/data/hbase/meta/1588230740/table/56c99be0d7d64a99a46ea0460939f88d 2024-12-08T05:51:35,589 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/data/hbase/meta/1588230740/table/56c99be0d7d64a99a46ea0460939f88d, entries=2, sequenceid=11, filesize=5.3 K 2024-12-08T05:51:35,590 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 96ms, sequenceid=11, compaction requested=false 2024-12-08T05:51:35,590 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-12-08T05:51:35,595 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-08T05:51:35,596 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-08T05:51:35,596 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T05:51:35,596 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T05:51:35,596 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T05:51:35,596 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-08T05:51:35,596 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-08T05:51:35,596 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1010355893, stopped=false 2024-12-08T05:51:35,596 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=0d942cb2025d,36481,1733637069282 2024-12-08T05:51:35,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36481-0x10190a0445e0000, quorum=127.0.0.1:61807, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T05:51:35,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43211-0x10190a0445e0001, quorum=127.0.0.1:61807, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T05:51:35,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36481-0x10190a0445e0000, quorum=127.0.0.1:61807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:35,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43211-0x10190a0445e0001, quorum=127.0.0.1:61807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:35,598 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-08T05:51:35,598 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-08T05:51:35,598 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T05:51:35,599 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T05:51:35,599 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '0d942cb2025d,43211,1733637069369' ***** 2024-12-08T05:51:35,599 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-08T05:51:35,599 INFO [RS:0;0d942cb2025d:43211 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-08T05:51:35,599 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43211-0x10190a0445e0001, quorum=127.0.0.1:61807, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T05:51:35,599 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-08T05:51:35,599 INFO [RS:0;0d942cb2025d:43211 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-08T05:51:35,599 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:36481-0x10190a0445e0000, quorum=127.0.0.1:61807, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T05:51:35,599 INFO [RS:0;0d942cb2025d:43211 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-08T05:51:35,599 INFO [RS:0;0d942cb2025d:43211 {}] regionserver.HRegionServer(3091): Received CLOSE for 240bf35d9c17fdaa81416c54f511b572 2024-12-08T05:51:35,600 INFO [RS:0;0d942cb2025d:43211 {}] regionserver.HRegionServer(959): stopping server 0d942cb2025d,43211,1733637069369 2024-12-08T05:51:35,600 INFO [RS:0;0d942cb2025d:43211 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T05:51:35,600 INFO [RS:0;0d942cb2025d:43211 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;0d942cb2025d:43211. 2024-12-08T05:51:35,600 DEBUG [RS:0;0d942cb2025d:43211 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T05:51:35,600 DEBUG [RS:0;0d942cb2025d:43211 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T05:51:35,600 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 240bf35d9c17fdaa81416c54f511b572, disabling compactions & flushes 2024-12-08T05:51:35,600 INFO [RS:0;0d942cb2025d:43211 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-08T05:51:35,600 INFO [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1733637070415.240bf35d9c17fdaa81416c54f511b572. 2024-12-08T05:51:35,600 INFO [RS:0;0d942cb2025d:43211 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-08T05:51:35,600 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733637070415.240bf35d9c17fdaa81416c54f511b572. 2024-12-08T05:51:35,600 INFO [RS:0;0d942cb2025d:43211 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-08T05:51:35,600 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733637070415.240bf35d9c17fdaa81416c54f511b572. after waiting 0 ms 2024-12-08T05:51:35,600 INFO [RS:0;0d942cb2025d:43211 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-08T05:51:35,600 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1733637070415.240bf35d9c17fdaa81416c54f511b572. 2024-12-08T05:51:35,600 INFO [RS:0;0d942cb2025d:43211 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-08T05:51:35,601 DEBUG [RS:0;0d942cb2025d:43211 {}] regionserver.HRegionServer(1325): Online Regions={240bf35d9c17fdaa81416c54f511b572=TestLogRolling-testLogRollOnPipelineRestart,,1733637070415.240bf35d9c17fdaa81416c54f511b572., 1588230740=hbase:meta,,1.1588230740} 2024-12-08T05:51:35,601 DEBUG [RS:0;0d942cb2025d:43211 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 240bf35d9c17fdaa81416c54f511b572 2024-12-08T05:51:35,601 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-08T05:51:35,601 INFO [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-08T05:51:35,601 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-08T05:51:35,601 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-08T05:51:35,601 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-08T05:51:35,605 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-08T05:51:35,605 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/data/default/TestLogRolling-testLogRollOnPipelineRestart/240bf35d9c17fdaa81416c54f511b572/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-12-08T05:51:35,606 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-08T05:51:35,606 INFO [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-08T05:51:35,606 INFO [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733637070415.240bf35d9c17fdaa81416c54f511b572. 2024-12-08T05:51:35,606 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 240bf35d9c17fdaa81416c54f511b572: Waiting for close lock at 1733637095600Running coprocessor pre-close hooks at 1733637095600Disabling compacts and flushes for region at 1733637095600Disabling writes for close at 1733637095600Writing region close event to WAL at 1733637095601 (+1 ms)Running coprocessor post-close hooks at 1733637095606 (+5 ms)Closed at 1733637095606 2024-12-08T05:51:35,606 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733637095601Running coprocessor pre-close hooks at 1733637095601Disabling compacts and flushes for region at 1733637095601Disabling writes for close at 1733637095601Writing region close event to WAL at 1733637095602 (+1 ms)Running coprocessor post-close hooks at 1733637095606 (+4 ms)Closed at 1733637095606 2024-12-08T05:51:35,606 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-08T05:51:35,606 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733637070415.240bf35d9c17fdaa81416c54f511b572. 2024-12-08T05:51:35,648 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-08T05:51:35,649 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-08T05:51:35,649 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-12-08T05:51:35,687 INFO [regionserver/0d942cb2025d:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-08T05:51:35,687 INFO [regionserver/0d942cb2025d:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-08T05:51:35,696 INFO [regionserver/0d942cb2025d:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T05:51:35,718 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:35,720 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:35,801 INFO [RS:0;0d942cb2025d:43211 {}] regionserver.HRegionServer(976): stopping server 0d942cb2025d,43211,1733637069369; all regions closed. 2024-12-08T05:51:35,801 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:35,801 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:35,802 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:35,802 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:35,802 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:35,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32791 is added to blk_1073741842_1025 (size=825) 2024-12-08T05:51:35,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34007 is added to blk_1073741842_1025 (size=825) 2024-12-08T05:51:36,718 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:36,721 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:37,719 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:37,721 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:38,719 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:38,722 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:39,261 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-08T05:51:39,487 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1014: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-08T05:51:39,502 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.meta.1733637070276.meta after 4002ms 2024-12-08T05:51:39,502 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/WALs/0d942cb2025d,43211,1733637069369/0d942cb2025d%2C43211%2C1733637069369.meta.1733637070276.meta to hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/oldWALs/0d942cb2025d%2C43211%2C1733637069369.meta.1733637070276.meta 2024-12-08T05:51:39,505 DEBUG [RS:0;0d942cb2025d:43211 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/oldWALs 2024-12-08T05:51:39,505 INFO [RS:0;0d942cb2025d:43211 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 0d942cb2025d%2C43211%2C1733637069369.meta:.meta(num 1733637095495) 2024-12-08T05:51:39,506 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:39,506 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:39,506 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:39,506 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:39,506 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:39,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32791 is added to blk_1073741840_1023 (size=1162) 2024-12-08T05:51:39,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34007 is added to blk_1073741840_1023 (size=1162) 2024-12-08T05:51:39,512 DEBUG [RS:0;0d942cb2025d:43211 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/oldWALs 2024-12-08T05:51:39,512 INFO [RS:0;0d942cb2025d:43211 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 0d942cb2025d%2C43211%2C1733637069369:(num 1733637095452) 2024-12-08T05:51:39,512 DEBUG [RS:0;0d942cb2025d:43211 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T05:51:39,512 INFO [RS:0;0d942cb2025d:43211 {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T05:51:39,513 INFO [RS:0;0d942cb2025d:43211 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T05:51:39,513 INFO [RS:0;0d942cb2025d:43211 {}] hbase.ChoreService(370): Chore service for: regionserver/0d942cb2025d:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-08T05:51:39,513 INFO [RS:0;0d942cb2025d:43211 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T05:51:39,513 INFO [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T05:51:39,513 INFO [RS:0;0d942cb2025d:43211 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43211 2024-12-08T05:51:39,515 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43211-0x10190a0445e0001, quorum=127.0.0.1:61807, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/0d942cb2025d,43211,1733637069369 2024-12-08T05:51:39,515 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36481-0x10190a0445e0000, quorum=127.0.0.1:61807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T05:51:39,515 INFO [RS:0;0d942cb2025d:43211 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T05:51:39,517 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [0d942cb2025d,43211,1733637069369] 2024-12-08T05:51:39,521 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/0d942cb2025d,43211,1733637069369 already deleted, retry=false 2024-12-08T05:51:39,521 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 0d942cb2025d,43211,1733637069369 expired; onlineServers=0 2024-12-08T05:51:39,521 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '0d942cb2025d,36481,1733637069282' ***** 2024-12-08T05:51:39,521 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-08T05:51:39,521 INFO [M:0;0d942cb2025d:36481 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T05:51:39,521 INFO [M:0;0d942cb2025d:36481 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T05:51:39,521 DEBUG [M:0;0d942cb2025d:36481 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-08T05:51:39,521 DEBUG [M:0;0d942cb2025d:36481 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-08T05:51:39,521 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-08T05:51:39,521 DEBUG [master/0d942cb2025d:0:becomeActiveMaster-HFileCleaner.small.0-1733637069618 {}] cleaner.HFileCleaner(306): Exit Thread[master/0d942cb2025d:0:becomeActiveMaster-HFileCleaner.small.0-1733637069618,5,FailOnTimeoutGroup] 2024-12-08T05:51:39,521 DEBUG [master/0d942cb2025d:0:becomeActiveMaster-HFileCleaner.large.0-1733637069618 {}] cleaner.HFileCleaner(306): Exit Thread[master/0d942cb2025d:0:becomeActiveMaster-HFileCleaner.large.0-1733637069618,5,FailOnTimeoutGroup] 2024-12-08T05:51:39,521 INFO [M:0;0d942cb2025d:36481 {}] hbase.ChoreService(370): Chore service for: master/0d942cb2025d:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-08T05:51:39,522 INFO [M:0;0d942cb2025d:36481 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T05:51:39,522 DEBUG [M:0;0d942cb2025d:36481 {}] master.HMaster(1795): Stopping service threads 2024-12-08T05:51:39,522 INFO [M:0;0d942cb2025d:36481 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-08T05:51:39,522 INFO [M:0;0d942cb2025d:36481 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-08T05:51:39,522 INFO [M:0;0d942cb2025d:36481 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-08T05:51:39,522 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-08T05:51:39,523 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36481-0x10190a0445e0000, quorum=127.0.0.1:61807, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-08T05:51:39,523 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36481-0x10190a0445e0000, quorum=127.0.0.1:61807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:39,523 DEBUG [M:0;0d942cb2025d:36481 {}] zookeeper.ZKUtil(347): master:36481-0x10190a0445e0000, quorum=127.0.0.1:61807, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-08T05:51:39,523 WARN [M:0;0d942cb2025d:36481 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-08T05:51:39,524 INFO [M:0;0d942cb2025d:36481 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/.lastflushedseqids 2024-12-08T05:51:39,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32791 is added to blk_1073741846_1030 (size=130) 2024-12-08T05:51:39,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34007 is added to blk_1073741846_1030 (size=130) 2024-12-08T05:51:39,529 INFO [M:0;0d942cb2025d:36481 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-08T05:51:39,529 INFO [M:0;0d942cb2025d:36481 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-08T05:51:39,530 DEBUG [M:0;0d942cb2025d:36481 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-08T05:51:39,530 INFO [M:0;0d942cb2025d:36481 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:51:39,530 DEBUG [M:0;0d942cb2025d:36481 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:51:39,530 DEBUG [M:0;0d942cb2025d:36481 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-08T05:51:39,530 DEBUG [M:0;0d942cb2025d:36481 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:51:39,530 INFO [M:0;0d942cb2025d:36481 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.17 KB heapSize=29.16 KB 2024-12-08T05:51:39,530 ERROR [FSHLog-0-hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/MasterData-prefix:0d942cb2025d,36481,1733637069282 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41681,DS-95d0f3e1-c73e-44c0-bdc2-8a5d260db9d3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:51:39,530 WARN [FSHLog-0-hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/MasterData-prefix:0d942cb2025d,36481,1733637069282 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41681,DS-95d0f3e1-c73e-44c0-bdc2-8a5d260db9d3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:51:39,531 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 0d942cb2025d%2C36481%2C1733637069282:(num 1733637069481) roll requested 2024-12-08T05:51:39,531 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0d942cb2025d%2C36481%2C1733637069282.1733637099531 2024-12-08T05:51:39,535 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:39,535 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:39,535 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:39,536 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:39,536 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:39,536 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/MasterData/WALs/0d942cb2025d,36481,1733637069282/0d942cb2025d%2C36481%2C1733637069282.1733637069481 with entries=53, filesize=26.62 KB; new WAL /user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/MasterData/WALs/0d942cb2025d,36481,1733637069282/0d942cb2025d%2C36481%2C1733637069282.1733637099531 2024-12-08T05:51:39,536 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41681,DS-95d0f3e1-c73e-44c0-bdc2-8a5d260db9d3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:51:39,536 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41681,DS-95d0f3e1-c73e-44c0-bdc2-8a5d260db9d3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-08T05:51:39,536 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/MasterData/WALs/0d942cb2025d,36481,1733637069282/0d942cb2025d%2C36481%2C1733637069282.1733637069481 2024-12-08T05:51:39,536 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39953:39953),(127.0.0.1/127.0.0.1:36737:36737)] 2024-12-08T05:51:39,537 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/MasterData/WALs/0d942cb2025d,36481,1733637069282/0d942cb2025d%2C36481%2C1733637069282.1733637069481 is not closed yet, will try archiving it next time 2024-12-08T05:51:39,537 WARN [IPC Server handler 4 on default port 34501 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/MasterData/WALs/0d942cb2025d,36481,1733637069282/0d942cb2025d%2C36481%2C1733637069282.1733637069481 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1015 2024-12-08T05:51:39,537 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/MasterData/WALs/0d942cb2025d,36481,1733637069282/0d942cb2025d%2C36481%2C1733637069282.1733637069481 after 1ms 2024-12-08T05:51:39,552 DEBUG [M:0;0d942cb2025d:36481 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/8fe812d192444b6f8bb044cc4c848604 is 82, key is hbase:meta,,1/info:regioninfo/1733637070321/Put/seqid=0 2024-12-08T05:51:39,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34007 is added to blk_1073741848_1033 (size=5672) 2024-12-08T05:51:39,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32791 is added to blk_1073741848_1033 (size=5672) 2024-12-08T05:51:39,557 INFO [M:0;0d942cb2025d:36481 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/8fe812d192444b6f8bb044cc4c848604 2024-12-08T05:51:39,576 DEBUG [M:0;0d942cb2025d:36481 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d61d1463b6e04352a23688cce1332d56 is 778, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733637070823/Put/seqid=0 2024-12-08T05:51:39,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32791 is added to blk_1073741849_1034 (size=6118) 2024-12-08T05:51:39,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34007 is added to blk_1073741849_1034 (size=6118) 2024-12-08T05:51:39,581 INFO [M:0;0d942cb2025d:36481 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.57 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d61d1463b6e04352a23688cce1332d56 2024-12-08T05:51:39,599 DEBUG [M:0;0d942cb2025d:36481 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/bc57aef6dbd74ac38947e8f1a271b09b is 69, key is 0d942cb2025d,43211,1733637069369/rs:state/1733637069641/Put/seqid=0 2024-12-08T05:51:39,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34007 is added to blk_1073741850_1035 (size=5156) 2024-12-08T05:51:39,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32791 is added to blk_1073741850_1035 (size=5156) 2024-12-08T05:51:39,604 INFO [M:0;0d942cb2025d:36481 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/bc57aef6dbd74ac38947e8f1a271b09b 2024-12-08T05:51:39,617 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43211-0x10190a0445e0001, quorum=127.0.0.1:61807, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T05:51:39,617 INFO [RS:0;0d942cb2025d:43211 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T05:51:39,617 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43211-0x10190a0445e0001, quorum=127.0.0.1:61807, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T05:51:39,617 INFO [RS:0;0d942cb2025d:43211 {}] regionserver.HRegionServer(1031): Exiting; stopping=0d942cb2025d,43211,1733637069369; zookeeper connection closed. 2024-12-08T05:51:39,617 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@77a120c5 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@77a120c5 2024-12-08T05:51:39,617 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-08T05:51:39,622 DEBUG [M:0;0d942cb2025d:36481 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/2fd7bdf4e1f94cffbcf3860628c14521 is 52, key is load_balancer_on/state:d/1733637070409/Put/seqid=0 2024-12-08T05:51:39,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32791 is added to blk_1073741851_1036 (size=5056) 2024-12-08T05:51:39,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34007 is added to blk_1073741851_1036 (size=5056) 2024-12-08T05:51:39,627 INFO [M:0;0d942cb2025d:36481 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/2fd7bdf4e1f94cffbcf3860628c14521 2024-12-08T05:51:39,632 DEBUG [M:0;0d942cb2025d:36481 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/8fe812d192444b6f8bb044cc4c848604 as hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/8fe812d192444b6f8bb044cc4c848604 2024-12-08T05:51:39,636 INFO [M:0;0d942cb2025d:36481 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/8fe812d192444b6f8bb044cc4c848604, entries=8, sequenceid=56, filesize=5.5 K 2024-12-08T05:51:39,637 DEBUG [M:0;0d942cb2025d:36481 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d61d1463b6e04352a23688cce1332d56 as hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/d61d1463b6e04352a23688cce1332d56 2024-12-08T05:51:39,642 INFO [M:0;0d942cb2025d:36481 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/d61d1463b6e04352a23688cce1332d56, entries=6, sequenceid=56, filesize=6.0 K 2024-12-08T05:51:39,643 DEBUG [M:0;0d942cb2025d:36481 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/bc57aef6dbd74ac38947e8f1a271b09b as hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/bc57aef6dbd74ac38947e8f1a271b09b 2024-12-08T05:51:39,647 INFO [M:0;0d942cb2025d:36481 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/bc57aef6dbd74ac38947e8f1a271b09b, entries=1, sequenceid=56, filesize=5.0 K 2024-12-08T05:51:39,647 DEBUG [M:0;0d942cb2025d:36481 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/2fd7bdf4e1f94cffbcf3860628c14521 as hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/2fd7bdf4e1f94cffbcf3860628c14521 2024-12-08T05:51:39,652 INFO [M:0;0d942cb2025d:36481 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/2fd7bdf4e1f94cffbcf3860628c14521, entries=1, sequenceid=56, filesize=4.9 K 2024-12-08T05:51:39,653 INFO [M:0;0d942cb2025d:36481 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.17 KB/23726, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 123ms, sequenceid=56, compaction requested=false 2024-12-08T05:51:39,655 INFO [M:0;0d942cb2025d:36481 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:51:39,655 DEBUG [M:0;0d942cb2025d:36481 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733637099530Disabling compacts and flushes for region at 1733637099530Disabling writes for close at 1733637099530Obtaining lock to block concurrent updates at 1733637099530Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733637099530Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23726, getHeapSize=29800, getOffHeapSize=0, getCellsCount=67 at 1733637099530Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733637099537 (+7 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733637099537Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733637099552 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733637099552Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733637099562 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733637099575 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733637099575Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733637099585 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733637099598 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733637099598Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733637099608 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733637099622 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733637099622Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1bd480fc: reopening flushed file at 1733637099631 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@31f906fa: reopening flushed file at 1733637099636 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5d8b8520: reopening flushed file at 1733637099642 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@68f83fb: reopening flushed file at 1733637099647 (+5 ms)Finished flush of dataSize ~23.17 KB/23726, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 123ms, sequenceid=56, compaction requested=false at 1733637099653 (+6 ms)Writing region close event to WAL at 1733637099654 (+1 ms)Closed at 1733637099654 2024-12-08T05:51:39,655 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:39,655 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:39,655 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:39,655 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:39,655 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:51:39,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34007 is added to blk_1073741847_1031 (size=757) 2024-12-08T05:51:39,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32791 is added to blk_1073741847_1031 (size=757) 2024-12-08T05:51:39,720 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:39,722 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:40,606 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:40,607 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:40,619 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:40,620 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:40,620 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:40,620 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:40,620 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:40,621 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:40,623 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:40,623 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:40,623 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:40,625 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:40,629 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:40,629 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:40,720 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:40,722 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:41,132 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-08T05:51:41,133 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:41,133 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:41,133 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:41,134 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:41,148 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:41,149 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:41,149 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:41,149 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:41,149 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:41,150 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:41,152 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:41,153 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:41,153 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:41,155 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:41,721 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:41,723 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:42,487 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1015: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-08T05:51:42,721 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:42,723 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:43,538 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/MasterData/WALs/0d942cb2025d,36481,1733637069282/0d942cb2025d%2C36481%2C1733637069282.1733637069481 after 4002ms 2024-12-08T05:51:43,538 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/MasterData/WALs/0d942cb2025d,36481,1733637069282/0d942cb2025d%2C36481%2C1733637069282.1733637069481 to hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/MasterData/oldWALs/0d942cb2025d%2C36481%2C1733637069282.1733637069481 2024-12-08T05:51:43,541 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/MasterData/oldWALs/0d942cb2025d%2C36481%2C1733637069282.1733637069481 to hdfs://localhost:34501/user/jenkins/test-data/2cbf53d1-1f17-8024-a89d-bd9a6e87b4a4/oldWALs/0d942cb2025d%2C36481%2C1733637069282.1733637069481$masterlocalwal$ 2024-12-08T05:51:43,541 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T05:51:43,541 INFO [M:0;0d942cb2025d:36481 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-08T05:51:43,542 INFO [M:0;0d942cb2025d:36481 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36481 2024-12-08T05:51:43,542 INFO [M:0;0d942cb2025d:36481 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T05:51:43,644 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36481-0x10190a0445e0000, quorum=127.0.0.1:61807, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T05:51:43,644 INFO [M:0;0d942cb2025d:36481 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T05:51:43,644 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36481-0x10190a0445e0000, quorum=127.0.0.1:61807, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T05:51:43,646 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4a43388a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T05:51:43,646 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6e90a91d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T05:51:43,646 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T05:51:43,646 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2bc4f0c3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T05:51:43,646 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@38da3df6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f73842bd-509c-ac2e-22fb-16b6ad6bb8f8/hadoop.log.dir/,STOPPED} 2024-12-08T05:51:43,648 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T05:51:43,648 WARN [BP-1308812349-172.17.0.2-1733637068326 heartbeating to localhost/127.0.0.1:34501 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T05:51:43,648 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T05:51:43,648 WARN [BP-1308812349-172.17.0.2-1733637068326 heartbeating to localhost/127.0.0.1:34501 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1308812349-172.17.0.2-1733637068326 (Datanode Uuid 0f32638d-08d9-44b7-8a7c-68c5a3f38827) service to localhost/127.0.0.1:34501 2024-12-08T05:51:43,648 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f73842bd-509c-ac2e-22fb-16b6ad6bb8f8/cluster_8a42907d-6224-13e1-832f-048855039e98/data/data3/current/BP-1308812349-172.17.0.2-1733637068326 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T05:51:43,649 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f73842bd-509c-ac2e-22fb-16b6ad6bb8f8/cluster_8a42907d-6224-13e1-832f-048855039e98/data/data4/current/BP-1308812349-172.17.0.2-1733637068326 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T05:51:43,649 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T05:51:43,651 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@67eaa136{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T05:51:43,651 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3a2d885f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T05:51:43,651 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T05:51:43,652 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7dea1853{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T05:51:43,652 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@190c30f1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f73842bd-509c-ac2e-22fb-16b6ad6bb8f8/hadoop.log.dir/,STOPPED} 2024-12-08T05:51:43,653 WARN [BP-1308812349-172.17.0.2-1733637068326 heartbeating to localhost/127.0.0.1:34501 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T05:51:43,653 WARN [BP-1308812349-172.17.0.2-1733637068326 heartbeating to localhost/127.0.0.1:34501 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1308812349-172.17.0.2-1733637068326 (Datanode Uuid b33389dd-32ed-4bbb-ad35-fee83d497013) service to localhost/127.0.0.1:34501 2024-12-08T05:51:43,653 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T05:51:43,653 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T05:51:43,653 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f73842bd-509c-ac2e-22fb-16b6ad6bb8f8/cluster_8a42907d-6224-13e1-832f-048855039e98/data/data1/current/BP-1308812349-172.17.0.2-1733637068326 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T05:51:43,654 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f73842bd-509c-ac2e-22fb-16b6ad6bb8f8/cluster_8a42907d-6224-13e1-832f-048855039e98/data/data2/current/BP-1308812349-172.17.0.2-1733637068326 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T05:51:43,654 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T05:51:43,659 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4b056d01{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-08T05:51:43,660 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@24dc4202{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T05:51:43,660 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T05:51:43,660 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@f6eeaf0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T05:51:43,660 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ad19754{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f73842bd-509c-ac2e-22fb-16b6ad6bb8f8/hadoop.log.dir/,STOPPED} 2024-12-08T05:51:43,666 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-08T05:51:43,682 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-08T05:51:43,690 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=181 (was 155) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:34501 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1609174458) connection to localhost/127.0.0.1:34501 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1609174458) connection to localhost/127.0.0.1:34501 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:34501 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34501 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1609174458) connection to localhost/127.0.0.1:34501 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34501 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34501 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=455 (was 454) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=133 (was 158), ProcessCount=11 (was 11), AvailableMemoryMB=7789 (was 7433) - AvailableMemoryMB LEAK? - 2024-12-08T05:51:43,697 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=181, OpenFileDescriptor=455, MaxFileDescriptor=1048576, SystemLoadAverage=133, ProcessCount=11, AvailableMemoryMB=7790 2024-12-08T05:51:43,698 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-08T05:51:43,698 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f73842bd-509c-ac2e-22fb-16b6ad6bb8f8/hadoop.log.dir so I do NOT create it in target/test-data/e41e6cf8-613f-7dce-f173-02f0bcf04c58 2024-12-08T05:51:43,698 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f73842bd-509c-ac2e-22fb-16b6ad6bb8f8/hadoop.tmp.dir so I do NOT create it in target/test-data/e41e6cf8-613f-7dce-f173-02f0bcf04c58 2024-12-08T05:51:43,698 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e41e6cf8-613f-7dce-f173-02f0bcf04c58/cluster_0e549d27-bad1-24e7-4bcc-f6a4a5c5e724, deleteOnExit=true 2024-12-08T05:51:43,698 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-08T05:51:43,698 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e41e6cf8-613f-7dce-f173-02f0bcf04c58/test.cache.data in system properties and HBase conf 2024-12-08T05:51:43,698 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e41e6cf8-613f-7dce-f173-02f0bcf04c58/hadoop.tmp.dir in system properties and HBase conf 2024-12-08T05:51:43,698 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e41e6cf8-613f-7dce-f173-02f0bcf04c58/hadoop.log.dir in system properties and HBase conf 2024-12-08T05:51:43,698 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e41e6cf8-613f-7dce-f173-02f0bcf04c58/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-08T05:51:43,698 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e41e6cf8-613f-7dce-f173-02f0bcf04c58/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-08T05:51:43,698 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-08T05:51:43,698 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-08T05:51:43,699 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e41e6cf8-613f-7dce-f173-02f0bcf04c58/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-08T05:51:43,699 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e41e6cf8-613f-7dce-f173-02f0bcf04c58/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-08T05:51:43,699 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e41e6cf8-613f-7dce-f173-02f0bcf04c58/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-08T05:51:43,699 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e41e6cf8-613f-7dce-f173-02f0bcf04c58/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-08T05:51:43,699 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e41e6cf8-613f-7dce-f173-02f0bcf04c58/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-08T05:51:43,699 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e41e6cf8-613f-7dce-f173-02f0bcf04c58/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-08T05:51:43,699 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e41e6cf8-613f-7dce-f173-02f0bcf04c58/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-08T05:51:43,699 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e41e6cf8-613f-7dce-f173-02f0bcf04c58/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-08T05:51:43,699 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e41e6cf8-613f-7dce-f173-02f0bcf04c58/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-08T05:51:43,699 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e41e6cf8-613f-7dce-f173-02f0bcf04c58/nfs.dump.dir in system properties and HBase conf 2024-12-08T05:51:43,699 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e41e6cf8-613f-7dce-f173-02f0bcf04c58/java.io.tmpdir in system properties and HBase conf 2024-12-08T05:51:43,699 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e41e6cf8-613f-7dce-f173-02f0bcf04c58/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-08T05:51:43,699 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e41e6cf8-613f-7dce-f173-02f0bcf04c58/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-08T05:51:43,699 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e41e6cf8-613f-7dce-f173-02f0bcf04c58/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-08T05:51:43,712 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-08T05:51:43,722 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:43,724 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:43,786 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T05:51:43,790 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T05:51:43,791 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T05:51:43,791 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T05:51:43,791 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T05:51:43,791 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T05:51:43,792 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7255e0a2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e41e6cf8-613f-7dce-f173-02f0bcf04c58/hadoop.log.dir/,AVAILABLE} 2024-12-08T05:51:43,792 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3eb3d465{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T05:51:43,907 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@45c003ed{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e41e6cf8-613f-7dce-f173-02f0bcf04c58/java.io.tmpdir/jetty-localhost-37175-hadoop-hdfs-3_4_1-tests_jar-_-any-8751419839479415218/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-08T05:51:43,907 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1ff1ded9{HTTP/1.1, (http/1.1)}{localhost:37175} 2024-12-08T05:51:43,907 INFO [Time-limited test {}] server.Server(415): Started @184091ms 2024-12-08T05:51:43,920 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-08T05:51:43,977 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T05:51:43,980 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T05:51:43,981 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T05:51:43,981 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T05:51:43,981 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T05:51:43,981 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3b7dc08e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e41e6cf8-613f-7dce-f173-02f0bcf04c58/hadoop.log.dir/,AVAILABLE} 2024-12-08T05:51:43,982 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5f86104c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T05:51:44,096 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3f519892{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e41e6cf8-613f-7dce-f173-02f0bcf04c58/java.io.tmpdir/jetty-localhost-38515-hadoop-hdfs-3_4_1-tests_jar-_-any-10863318369577768621/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T05:51:44,096 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7a68ce5b{HTTP/1.1, (http/1.1)}{localhost:38515} 2024-12-08T05:51:44,096 INFO [Time-limited test {}] server.Server(415): Started @184280ms 2024-12-08T05:51:44,098 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T05:51:44,125 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T05:51:44,128 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T05:51:44,129 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T05:51:44,129 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T05:51:44,129 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T05:51:44,129 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3f3146c7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e41e6cf8-613f-7dce-f173-02f0bcf04c58/hadoop.log.dir/,AVAILABLE} 2024-12-08T05:51:44,130 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@429ea7f5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T05:51:44,195 WARN [Thread-1634 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e41e6cf8-613f-7dce-f173-02f0bcf04c58/cluster_0e549d27-bad1-24e7-4bcc-f6a4a5c5e724/data/data2/current/BP-1825605769-172.17.0.2-1733637103728/current, will proceed with Du for space computation calculation, 2024-12-08T05:51:44,195 WARN [Thread-1633 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e41e6cf8-613f-7dce-f173-02f0bcf04c58/cluster_0e549d27-bad1-24e7-4bcc-f6a4a5c5e724/data/data1/current/BP-1825605769-172.17.0.2-1733637103728/current, will proceed with Du for space computation calculation, 2024-12-08T05:51:44,211 WARN [Thread-1612 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T05:51:44,214 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3ab12839c01ff764 with lease ID 0xe092fb99dedcdea4: Processing first storage report for DS-89bebb0d-8be1-4a82-ac01-3b60d47cee83 from datanode DatanodeRegistration(127.0.0.1:39511, datanodeUuid=6895a8d4-eb37-4f43-80e9-0d2b5d47378c, infoPort=36267, infoSecurePort=0, ipcPort=39927, storageInfo=lv=-57;cid=testClusterID;nsid=1668388751;c=1733637103728) 2024-12-08T05:51:44,214 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3ab12839c01ff764 with lease ID 0xe092fb99dedcdea4: from storage DS-89bebb0d-8be1-4a82-ac01-3b60d47cee83 node DatanodeRegistration(127.0.0.1:39511, datanodeUuid=6895a8d4-eb37-4f43-80e9-0d2b5d47378c, infoPort=36267, infoSecurePort=0, ipcPort=39927, storageInfo=lv=-57;cid=testClusterID;nsid=1668388751;c=1733637103728), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T05:51:44,214 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3ab12839c01ff764 with lease ID 0xe092fb99dedcdea4: Processing first storage report for DS-984c5df6-11b2-4000-8df6-668b2bcc57b9 from datanode DatanodeRegistration(127.0.0.1:39511, datanodeUuid=6895a8d4-eb37-4f43-80e9-0d2b5d47378c, infoPort=36267, infoSecurePort=0, ipcPort=39927, storageInfo=lv=-57;cid=testClusterID;nsid=1668388751;c=1733637103728) 2024-12-08T05:51:44,214 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3ab12839c01ff764 with lease ID 0xe092fb99dedcdea4: from storage DS-984c5df6-11b2-4000-8df6-668b2bcc57b9 node DatanodeRegistration(127.0.0.1:39511, datanodeUuid=6895a8d4-eb37-4f43-80e9-0d2b5d47378c, infoPort=36267, infoSecurePort=0, ipcPort=39927, storageInfo=lv=-57;cid=testClusterID;nsid=1668388751;c=1733637103728), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T05:51:44,246 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@75e53117{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e41e6cf8-613f-7dce-f173-02f0bcf04c58/java.io.tmpdir/jetty-localhost-36793-hadoop-hdfs-3_4_1-tests_jar-_-any-13863400320782858031/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T05:51:44,247 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1c9d71b0{HTTP/1.1, (http/1.1)}{localhost:36793} 2024-12-08T05:51:44,247 INFO [Time-limited test {}] server.Server(415): Started @184431ms 2024-12-08T05:51:44,248 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T05:51:44,337 WARN [Thread-1659 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e41e6cf8-613f-7dce-f173-02f0bcf04c58/cluster_0e549d27-bad1-24e7-4bcc-f6a4a5c5e724/data/data3/current/BP-1825605769-172.17.0.2-1733637103728/current, will proceed with Du for space computation calculation, 2024-12-08T05:51:44,337 WARN [Thread-1660 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e41e6cf8-613f-7dce-f173-02f0bcf04c58/cluster_0e549d27-bad1-24e7-4bcc-f6a4a5c5e724/data/data4/current/BP-1825605769-172.17.0.2-1733637103728/current, will proceed with Du for space computation calculation, 2024-12-08T05:51:44,353 WARN [Thread-1648 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T05:51:44,356 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3a2baf3e0ce0d85a with lease ID 0xe092fb99dedcdea5: Processing first storage report for DS-e7935565-53cb-41f2-9bdf-f89f33c40daa from datanode DatanodeRegistration(127.0.0.1:33893, datanodeUuid=bb808f8c-e084-41fe-a6ad-81f5317f75fc, infoPort=38183, infoSecurePort=0, ipcPort=45943, storageInfo=lv=-57;cid=testClusterID;nsid=1668388751;c=1733637103728) 2024-12-08T05:51:44,356 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3a2baf3e0ce0d85a with lease ID 0xe092fb99dedcdea5: from storage DS-e7935565-53cb-41f2-9bdf-f89f33c40daa node DatanodeRegistration(127.0.0.1:33893, datanodeUuid=bb808f8c-e084-41fe-a6ad-81f5317f75fc, infoPort=38183, infoSecurePort=0, ipcPort=45943, storageInfo=lv=-57;cid=testClusterID;nsid=1668388751;c=1733637103728), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-08T05:51:44,356 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3a2baf3e0ce0d85a with lease ID 0xe092fb99dedcdea5: Processing first storage report for DS-b625cfaa-adbb-4f96-aa2f-d0814d3442db from datanode DatanodeRegistration(127.0.0.1:33893, datanodeUuid=bb808f8c-e084-41fe-a6ad-81f5317f75fc, infoPort=38183, infoSecurePort=0, ipcPort=45943, storageInfo=lv=-57;cid=testClusterID;nsid=1668388751;c=1733637103728) 2024-12-08T05:51:44,356 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3a2baf3e0ce0d85a with lease ID 0xe092fb99dedcdea5: from storage DS-b625cfaa-adbb-4f96-aa2f-d0814d3442db node DatanodeRegistration(127.0.0.1:33893, datanodeUuid=bb808f8c-e084-41fe-a6ad-81f5317f75fc, infoPort=38183, infoSecurePort=0, ipcPort=45943, storageInfo=lv=-57;cid=testClusterID;nsid=1668388751;c=1733637103728), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T05:51:44,369 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e41e6cf8-613f-7dce-f173-02f0bcf04c58 2024-12-08T05:51:44,371 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e41e6cf8-613f-7dce-f173-02f0bcf04c58/cluster_0e549d27-bad1-24e7-4bcc-f6a4a5c5e724/zookeeper_0, clientPort=64902, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e41e6cf8-613f-7dce-f173-02f0bcf04c58/cluster_0e549d27-bad1-24e7-4bcc-f6a4a5c5e724/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e41e6cf8-613f-7dce-f173-02f0bcf04c58/cluster_0e549d27-bad1-24e7-4bcc-f6a4a5c5e724/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-08T05:51:44,372 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=64902 2024-12-08T05:51:44,372 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:51:44,374 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:51:44,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33893 is added to blk_1073741825_1001 (size=7) 2024-12-08T05:51:44,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39511 is added to blk_1073741825_1001 (size=7) 2024-12-08T05:51:44,383 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158 with version=8 2024-12-08T05:51:44,383 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/hbase-staging 2024-12-08T05:51:44,385 INFO [Time-limited test {}] client.ConnectionUtils(128): master/0d942cb2025d:0 server-side Connection retries=45 2024-12-08T05:51:44,385 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T05:51:44,385 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T05:51:44,385 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T05:51:44,385 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T05:51:44,385 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T05:51:44,385 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-08T05:51:44,385 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T05:51:44,386 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39283 2024-12-08T05:51:44,387 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:39283 connecting to ZooKeeper ensemble=127.0.0.1:64902 2024-12-08T05:51:44,392 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:392830x0, quorum=127.0.0.1:64902, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T05:51:44,393 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:39283-0x10190a0cd820000 connected 2024-12-08T05:51:44,406 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:51:44,407 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:51:44,409 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39283-0x10190a0cd820000, quorum=127.0.0.1:64902, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T05:51:44,409 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158, hbase.cluster.distributed=false 2024-12-08T05:51:44,410 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39283-0x10190a0cd820000, quorum=127.0.0.1:64902, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T05:51:44,411 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39283 2024-12-08T05:51:44,411 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39283 2024-12-08T05:51:44,411 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39283 2024-12-08T05:51:44,411 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39283 2024-12-08T05:51:44,411 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39283 2024-12-08T05:51:44,426 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/0d942cb2025d:0 server-side Connection retries=45 2024-12-08T05:51:44,426 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T05:51:44,426 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T05:51:44,426 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T05:51:44,426 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T05:51:44,426 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T05:51:44,426 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-08T05:51:44,426 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T05:51:44,427 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40823 2024-12-08T05:51:44,428 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40823 connecting to ZooKeeper ensemble=127.0.0.1:64902 2024-12-08T05:51:44,428 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:51:44,430 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:51:44,434 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:408230x0, quorum=127.0.0.1:64902, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T05:51:44,434 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:408230x0, quorum=127.0.0.1:64902, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T05:51:44,434 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40823-0x10190a0cd820001 connected 2024-12-08T05:51:44,434 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-08T05:51:44,435 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-08T05:51:44,435 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40823-0x10190a0cd820001, quorum=127.0.0.1:64902, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-08T05:51:44,436 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40823-0x10190a0cd820001, quorum=127.0.0.1:64902, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T05:51:44,436 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40823 2024-12-08T05:51:44,436 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40823 2024-12-08T05:51:44,437 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40823 2024-12-08T05:51:44,437 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40823 2024-12-08T05:51:44,437 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40823 2024-12-08T05:51:44,453 DEBUG [M:0;0d942cb2025d:39283 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;0d942cb2025d:39283 2024-12-08T05:51:44,453 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/0d942cb2025d,39283,1733637104385 2024-12-08T05:51:44,455 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40823-0x10190a0cd820001, quorum=127.0.0.1:64902, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T05:51:44,455 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39283-0x10190a0cd820000, quorum=127.0.0.1:64902, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T05:51:44,455 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39283-0x10190a0cd820000, quorum=127.0.0.1:64902, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/0d942cb2025d,39283,1733637104385 2024-12-08T05:51:44,458 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40823-0x10190a0cd820001, quorum=127.0.0.1:64902, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-08T05:51:44,458 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39283-0x10190a0cd820000, quorum=127.0.0.1:64902, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:44,458 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40823-0x10190a0cd820001, quorum=127.0.0.1:64902, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:44,458 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39283-0x10190a0cd820000, quorum=127.0.0.1:64902, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-08T05:51:44,459 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/0d942cb2025d,39283,1733637104385 from backup master directory 2024-12-08T05:51:44,460 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39283-0x10190a0cd820000, quorum=127.0.0.1:64902, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/0d942cb2025d,39283,1733637104385 2024-12-08T05:51:44,460 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40823-0x10190a0cd820001, quorum=127.0.0.1:64902, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T05:51:44,460 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39283-0x10190a0cd820000, quorum=127.0.0.1:64902, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T05:51:44,460 WARN [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T05:51:44,460 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=0d942cb2025d,39283,1733637104385 2024-12-08T05:51:44,464 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/hbase.id] with ID: f53bd42f-75e2-43ed-ad29-fd8eeaaabf32 2024-12-08T05:51:44,464 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/.tmp/hbase.id 2024-12-08T05:51:44,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39511 is added to blk_1073741826_1002 (size=42) 2024-12-08T05:51:44,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33893 is added to blk_1073741826_1002 (size=42) 2024-12-08T05:51:44,474 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/.tmp/hbase.id]:[hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/hbase.id] 2024-12-08T05:51:44,485 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:51:44,485 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-08T05:51:44,486 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-08T05:51:44,488 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40823-0x10190a0cd820001, quorum=127.0.0.1:64902, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:44,488 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39283-0x10190a0cd820000, quorum=127.0.0.1:64902, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:44,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33893 is added to blk_1073741827_1003 (size=196) 2024-12-08T05:51:44,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39511 is added to blk_1073741827_1003 (size=196) 2024-12-08T05:51:44,494 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T05:51:44,495 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-08T05:51:44,495 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T05:51:44,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33893 is added to blk_1073741828_1004 (size=1189) 2024-12-08T05:51:44,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39511 is added to blk_1073741828_1004 (size=1189) 2024-12-08T05:51:44,502 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/MasterData/data/master/store 2024-12-08T05:51:44,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33893 is added to blk_1073741829_1005 (size=34) 2024-12-08T05:51:44,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39511 is added to blk_1073741829_1005 (size=34) 2024-12-08T05:51:44,509 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T05:51:44,509 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-08T05:51:44,509 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:51:44,509 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:51:44,509 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-08T05:51:44,509 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:51:44,509 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:51:44,509 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733637104509Disabling compacts and flushes for region at 1733637104509Disabling writes for close at 1733637104509Writing region close event to WAL at 1733637104509Closed at 1733637104509 2024-12-08T05:51:44,510 WARN [master/0d942cb2025d:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/MasterData/data/master/store/.initializing 2024-12-08T05:51:44,510 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/MasterData/WALs/0d942cb2025d,39283,1733637104385 2024-12-08T05:51:44,512 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0d942cb2025d%2C39283%2C1733637104385, suffix=, logDir=hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/MasterData/WALs/0d942cb2025d,39283,1733637104385, archiveDir=hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/MasterData/oldWALs, maxLogs=10 2024-12-08T05:51:44,512 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0d942cb2025d%2C39283%2C1733637104385.1733637104512 2024-12-08T05:51:44,517 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/MasterData/WALs/0d942cb2025d,39283,1733637104385/0d942cb2025d%2C39283%2C1733637104385.1733637104512 2024-12-08T05:51:44,520 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38183:38183),(127.0.0.1/127.0.0.1:36267:36267)] 2024-12-08T05:51:44,520 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-08T05:51:44,521 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T05:51:44,521 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:51:44,521 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:51:44,522 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:51:44,523 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-08T05:51:44,523 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:51:44,523 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:51:44,524 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:51:44,524 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-08T05:51:44,524 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:51:44,525 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T05:51:44,525 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:51:44,526 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-08T05:51:44,526 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:51:44,526 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T05:51:44,526 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:51:44,527 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-08T05:51:44,527 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:51:44,528 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T05:51:44,528 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:51:44,529 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:51:44,529 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:51:44,530 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:51:44,530 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:51:44,531 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-08T05:51:44,532 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:51:44,534 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T05:51:44,534 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=744594, jitterRate=-0.05320031940937042}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-08T05:51:44,535 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733637104521Initializing all the Stores at 1733637104521Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733637104521Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733637104522 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733637104522Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733637104522Cleaning up temporary data from old regions at 1733637104530 (+8 ms)Region opened successfully at 1733637104535 (+5 ms) 2024-12-08T05:51:44,535 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-08T05:51:44,538 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@a6ca436, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0d942cb2025d/172.17.0.2:0 2024-12-08T05:51:44,539 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-08T05:51:44,539 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-08T05:51:44,539 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-08T05:51:44,539 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-08T05:51:44,539 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-08T05:51:44,540 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-08T05:51:44,540 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-08T05:51:44,541 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-08T05:51:44,542 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39283-0x10190a0cd820000, quorum=127.0.0.1:64902, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-08T05:51:44,543 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-08T05:51:44,544 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-08T05:51:44,544 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39283-0x10190a0cd820000, quorum=127.0.0.1:64902, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-08T05:51:44,547 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-08T05:51:44,547 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-08T05:51:44,548 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39283-0x10190a0cd820000, quorum=127.0.0.1:64902, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-08T05:51:44,549 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-08T05:51:44,550 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39283-0x10190a0cd820000, quorum=127.0.0.1:64902, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-08T05:51:44,551 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-08T05:51:44,552 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39283-0x10190a0cd820000, quorum=127.0.0.1:64902, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-08T05:51:44,553 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-08T05:51:44,555 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39283-0x10190a0cd820000, quorum=127.0.0.1:64902, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T05:51:44,555 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40823-0x10190a0cd820001, quorum=127.0.0.1:64902, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T05:51:44,555 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39283-0x10190a0cd820000, quorum=127.0.0.1:64902, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:44,556 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40823-0x10190a0cd820001, quorum=127.0.0.1:64902, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:44,556 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=0d942cb2025d,39283,1733637104385, sessionid=0x10190a0cd820000, setting cluster-up flag (Was=false) 2024-12-08T05:51:44,558 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39283-0x10190a0cd820000, quorum=127.0.0.1:64902, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:44,558 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40823-0x10190a0cd820001, quorum=127.0.0.1:64902, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:44,564 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-08T05:51:44,565 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0d942cb2025d,39283,1733637104385 2024-12-08T05:51:44,568 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39283-0x10190a0cd820000, quorum=127.0.0.1:64902, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:44,568 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40823-0x10190a0cd820001, quorum=127.0.0.1:64902, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:44,573 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-08T05:51:44,573 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0d942cb2025d,39283,1733637104385 2024-12-08T05:51:44,574 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-08T05:51:44,576 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-08T05:51:44,576 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-08T05:51:44,576 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-08T05:51:44,576 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 0d942cb2025d,39283,1733637104385 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-08T05:51:44,577 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/0d942cb2025d:0, corePoolSize=5, maxPoolSize=5 2024-12-08T05:51:44,577 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/0d942cb2025d:0, corePoolSize=5, maxPoolSize=5 2024-12-08T05:51:44,578 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/0d942cb2025d:0, corePoolSize=5, maxPoolSize=5 2024-12-08T05:51:44,578 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/0d942cb2025d:0, corePoolSize=5, maxPoolSize=5 2024-12-08T05:51:44,578 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/0d942cb2025d:0, corePoolSize=10, maxPoolSize=10 2024-12-08T05:51:44,578 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:44,578 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/0d942cb2025d:0, corePoolSize=2, maxPoolSize=2 2024-12-08T05:51:44,578 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:44,578 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733637134578 2024-12-08T05:51:44,578 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-08T05:51:44,579 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-08T05:51:44,579 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-08T05:51:44,579 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-08T05:51:44,579 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-08T05:51:44,579 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-08T05:51:44,579 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:44,579 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-08T05:51:44,579 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-08T05:51:44,579 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-08T05:51:44,579 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T05:51:44,579 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-08T05:51:44,579 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-08T05:51:44,580 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-08T05:51:44,580 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/0d942cb2025d:0:becomeActiveMaster-HFileCleaner.large.0-1733637104580,5,FailOnTimeoutGroup] 2024-12-08T05:51:44,580 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/0d942cb2025d:0:becomeActiveMaster-HFileCleaner.small.0-1733637104580,5,FailOnTimeoutGroup] 2024-12-08T05:51:44,580 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:44,580 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-08T05:51:44,580 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:44,580 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:44,580 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:51:44,581 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-08T05:51:44,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39511 is added to blk_1073741831_1007 (size=1321) 2024-12-08T05:51:44,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33893 is added to blk_1073741831_1007 (size=1321) 2024-12-08T05:51:44,587 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-08T05:51:44,587 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158 2024-12-08T05:51:44,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39511 is added to blk_1073741832_1008 (size=32) 2024-12-08T05:51:44,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33893 is added to blk_1073741832_1008 (size=32) 2024-12-08T05:51:44,594 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T05:51:44,595 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-08T05:51:44,596 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-08T05:51:44,596 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:51:44,596 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:51:44,596 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-08T05:51:44,597 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-08T05:51:44,597 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:51:44,598 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:51:44,598 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-08T05:51:44,599 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-08T05:51:44,599 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:51:44,599 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:51:44,599 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-08T05:51:44,600 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-08T05:51:44,600 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:51:44,600 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:51:44,601 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-08T05:51:44,601 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/data/hbase/meta/1588230740 2024-12-08T05:51:44,601 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/data/hbase/meta/1588230740 2024-12-08T05:51:44,602 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-08T05:51:44,602 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-08T05:51:44,603 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-08T05:51:44,604 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-08T05:51:44,606 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T05:51:44,606 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=816814, jitterRate=0.03863346576690674}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-08T05:51:44,607 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733637104594Initializing all the Stores at 1733637104594Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733637104594Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733637104595 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733637104595Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733637104595Cleaning up temporary data from old regions at 1733637104602 (+7 ms)Region opened successfully at 1733637104607 (+5 ms) 2024-12-08T05:51:44,607 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-08T05:51:44,607 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-08T05:51:44,607 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-08T05:51:44,607 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-08T05:51:44,607 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-08T05:51:44,607 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-08T05:51:44,607 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733637104607Disabling compacts and flushes for region at 1733637104607Disabling writes for close at 1733637104607Writing region close event to WAL at 1733637104607Closed at 1733637104607 2024-12-08T05:51:44,609 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T05:51:44,609 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-08T05:51:44,609 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-08T05:51:44,610 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-08T05:51:44,611 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-08T05:51:44,639 INFO [RS:0;0d942cb2025d:40823 {}] regionserver.HRegionServer(746): ClusterId : f53bd42f-75e2-43ed-ad29-fd8eeaaabf32 2024-12-08T05:51:44,639 DEBUG [RS:0;0d942cb2025d:40823 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-08T05:51:44,641 DEBUG [RS:0;0d942cb2025d:40823 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-08T05:51:44,641 DEBUG [RS:0;0d942cb2025d:40823 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-08T05:51:44,643 DEBUG [RS:0;0d942cb2025d:40823 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-08T05:51:44,644 DEBUG [RS:0;0d942cb2025d:40823 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@59fadd61, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0d942cb2025d/172.17.0.2:0 2024-12-08T05:51:44,656 DEBUG [RS:0;0d942cb2025d:40823 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;0d942cb2025d:40823 2024-12-08T05:51:44,656 INFO [RS:0;0d942cb2025d:40823 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-08T05:51:44,656 INFO [RS:0;0d942cb2025d:40823 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-08T05:51:44,656 DEBUG [RS:0;0d942cb2025d:40823 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-08T05:51:44,657 INFO [RS:0;0d942cb2025d:40823 {}] regionserver.HRegionServer(2659): reportForDuty to master=0d942cb2025d,39283,1733637104385 with port=40823, startcode=1733637104426 2024-12-08T05:51:44,657 DEBUG [RS:0;0d942cb2025d:40823 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-08T05:51:44,659 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50313, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-08T05:51:44,659 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39283 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 0d942cb2025d,40823,1733637104426 2024-12-08T05:51:44,660 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39283 {}] master.ServerManager(517): Registering regionserver=0d942cb2025d,40823,1733637104426 2024-12-08T05:51:44,661 DEBUG [RS:0;0d942cb2025d:40823 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158 2024-12-08T05:51:44,661 DEBUG [RS:0;0d942cb2025d:40823 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42183 2024-12-08T05:51:44,661 DEBUG [RS:0;0d942cb2025d:40823 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-08T05:51:44,663 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39283-0x10190a0cd820000, quorum=127.0.0.1:64902, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T05:51:44,663 DEBUG [RS:0;0d942cb2025d:40823 {}] zookeeper.ZKUtil(111): regionserver:40823-0x10190a0cd820001, quorum=127.0.0.1:64902, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/0d942cb2025d,40823,1733637104426 2024-12-08T05:51:44,663 WARN [RS:0;0d942cb2025d:40823 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T05:51:44,663 INFO [RS:0;0d942cb2025d:40823 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T05:51:44,664 DEBUG [RS:0;0d942cb2025d:40823 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/WALs/0d942cb2025d,40823,1733637104426 2024-12-08T05:51:44,664 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [0d942cb2025d,40823,1733637104426] 2024-12-08T05:51:44,667 INFO [RS:0;0d942cb2025d:40823 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-08T05:51:44,668 INFO [RS:0;0d942cb2025d:40823 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-08T05:51:44,672 INFO [RS:0;0d942cb2025d:40823 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-08T05:51:44,672 INFO [RS:0;0d942cb2025d:40823 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:44,672 INFO [RS:0;0d942cb2025d:40823 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-08T05:51:44,673 INFO [RS:0;0d942cb2025d:40823 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-08T05:51:44,673 INFO [RS:0;0d942cb2025d:40823 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:44,673 DEBUG [RS:0;0d942cb2025d:40823 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:44,673 DEBUG [RS:0;0d942cb2025d:40823 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:44,673 DEBUG [RS:0;0d942cb2025d:40823 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:44,673 DEBUG [RS:0;0d942cb2025d:40823 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:44,673 DEBUG [RS:0;0d942cb2025d:40823 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:44,673 DEBUG [RS:0;0d942cb2025d:40823 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/0d942cb2025d:0, corePoolSize=2, maxPoolSize=2 2024-12-08T05:51:44,673 DEBUG [RS:0;0d942cb2025d:40823 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:44,673 DEBUG [RS:0;0d942cb2025d:40823 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:44,674 DEBUG [RS:0;0d942cb2025d:40823 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:44,674 DEBUG [RS:0;0d942cb2025d:40823 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:44,674 DEBUG [RS:0;0d942cb2025d:40823 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:44,674 DEBUG [RS:0;0d942cb2025d:40823 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:51:44,674 DEBUG [RS:0;0d942cb2025d:40823 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/0d942cb2025d:0, corePoolSize=3, maxPoolSize=3 2024-12-08T05:51:44,674 DEBUG [RS:0;0d942cb2025d:40823 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0, corePoolSize=3, maxPoolSize=3 2024-12-08T05:51:44,675 INFO [RS:0;0d942cb2025d:40823 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:44,675 INFO [RS:0;0d942cb2025d:40823 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:44,675 INFO [RS:0;0d942cb2025d:40823 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:44,675 INFO [RS:0;0d942cb2025d:40823 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:44,675 INFO [RS:0;0d942cb2025d:40823 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:44,675 INFO [RS:0;0d942cb2025d:40823 {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,40823,1733637104426-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T05:51:44,690 INFO [RS:0;0d942cb2025d:40823 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-08T05:51:44,690 INFO [RS:0;0d942cb2025d:40823 {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,40823,1733637104426-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:44,690 INFO [RS:0;0d942cb2025d:40823 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:44,690 INFO [RS:0;0d942cb2025d:40823 {}] regionserver.Replication(171): 0d942cb2025d,40823,1733637104426 started 2024-12-08T05:51:44,703 INFO [RS:0;0d942cb2025d:40823 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:44,703 INFO [RS:0;0d942cb2025d:40823 {}] regionserver.HRegionServer(1482): Serving as 0d942cb2025d,40823,1733637104426, RpcServer on 0d942cb2025d/172.17.0.2:40823, sessionid=0x10190a0cd820001 2024-12-08T05:51:44,703 DEBUG [RS:0;0d942cb2025d:40823 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-08T05:51:44,703 DEBUG [RS:0;0d942cb2025d:40823 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 0d942cb2025d,40823,1733637104426 2024-12-08T05:51:44,703 DEBUG [RS:0;0d942cb2025d:40823 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0d942cb2025d,40823,1733637104426' 2024-12-08T05:51:44,703 DEBUG [RS:0;0d942cb2025d:40823 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-08T05:51:44,704 DEBUG [RS:0;0d942cb2025d:40823 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-08T05:51:44,704 DEBUG [RS:0;0d942cb2025d:40823 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-08T05:51:44,704 DEBUG [RS:0;0d942cb2025d:40823 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-08T05:51:44,704 DEBUG [RS:0;0d942cb2025d:40823 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 0d942cb2025d,40823,1733637104426 2024-12-08T05:51:44,704 DEBUG [RS:0;0d942cb2025d:40823 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0d942cb2025d,40823,1733637104426' 2024-12-08T05:51:44,704 DEBUG [RS:0;0d942cb2025d:40823 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-08T05:51:44,704 DEBUG [RS:0;0d942cb2025d:40823 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-08T05:51:44,705 DEBUG [RS:0;0d942cb2025d:40823 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-08T05:51:44,705 INFO [RS:0;0d942cb2025d:40823 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-08T05:51:44,705 INFO [RS:0;0d942cb2025d:40823 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-08T05:51:44,722 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:44,724 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:44,761 WARN [0d942cb2025d:39283 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-08T05:51:44,806 INFO [RS:0;0d942cb2025d:40823 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0d942cb2025d%2C40823%2C1733637104426, suffix=, logDir=hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/WALs/0d942cb2025d,40823,1733637104426, archiveDir=hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/oldWALs, maxLogs=32 2024-12-08T05:51:44,807 INFO [RS:0;0d942cb2025d:40823 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0d942cb2025d%2C40823%2C1733637104426.1733637104807 2024-12-08T05:51:44,812 INFO [RS:0;0d942cb2025d:40823 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/WALs/0d942cb2025d,40823,1733637104426/0d942cb2025d%2C40823%2C1733637104426.1733637104807 2024-12-08T05:51:44,813 DEBUG [RS:0;0d942cb2025d:40823 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36267:36267),(127.0.0.1/127.0.0.1:38183:38183)] 2024-12-08T05:51:45,011 DEBUG [0d942cb2025d:39283 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-08T05:51:45,012 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=0d942cb2025d,40823,1733637104426 2024-12-08T05:51:45,013 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0d942cb2025d,40823,1733637104426, state=OPENING 2024-12-08T05:51:45,015 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-08T05:51:45,016 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39283-0x10190a0cd820000, quorum=127.0.0.1:64902, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:45,016 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40823-0x10190a0cd820001, quorum=127.0.0.1:64902, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:51:45,017 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-08T05:51:45,017 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T05:51:45,017 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=0d942cb2025d,40823,1733637104426}] 2024-12-08T05:51:45,017 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T05:51:45,170 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-08T05:51:45,172 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58523, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-08T05:51:45,175 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-08T05:51:45,175 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T05:51:45,177 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0d942cb2025d%2C40823%2C1733637104426.meta, suffix=.meta, logDir=hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/WALs/0d942cb2025d,40823,1733637104426, archiveDir=hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/oldWALs, maxLogs=32 2024-12-08T05:51:45,177 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 0d942cb2025d%2C40823%2C1733637104426.meta.1733637105177.meta 2024-12-08T05:51:45,185 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/WALs/0d942cb2025d,40823,1733637104426/0d942cb2025d%2C40823%2C1733637104426.meta.1733637105177.meta 2024-12-08T05:51:45,192 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38183:38183),(127.0.0.1/127.0.0.1:36267:36267)] 2024-12-08T05:51:45,193 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-08T05:51:45,193 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-08T05:51:45,193 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-08T05:51:45,194 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-08T05:51:45,194 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-08T05:51:45,194 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T05:51:45,194 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-08T05:51:45,194 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-08T05:51:45,195 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-08T05:51:45,196 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-08T05:51:45,196 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:51:45,196 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:51:45,196 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-08T05:51:45,197 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-08T05:51:45,197 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:51:45,197 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:51:45,198 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-08T05:51:45,198 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-08T05:51:45,198 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:51:45,199 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:51:45,199 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-08T05:51:45,199 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-08T05:51:45,199 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:51:45,200 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:51:45,200 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-08T05:51:45,200 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/data/hbase/meta/1588230740 2024-12-08T05:51:45,201 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/data/hbase/meta/1588230740 2024-12-08T05:51:45,202 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-08T05:51:45,202 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-08T05:51:45,203 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-08T05:51:45,204 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-08T05:51:45,204 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=694740, jitterRate=-0.11659307777881622}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-08T05:51:45,205 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-08T05:51:45,205 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733637105194Writing region info on filesystem at 1733637105194Initializing all the Stores at 1733637105195 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733637105195Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733637105195Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733637105195Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733637105195Cleaning up temporary data from old regions at 1733637105202 (+7 ms)Running coprocessor post-open hooks at 1733637105205 (+3 ms)Region opened successfully at 1733637105205 2024-12-08T05:51:45,206 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733637105170 2024-12-08T05:51:45,208 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-08T05:51:45,208 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-08T05:51:45,209 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=0d942cb2025d,40823,1733637104426 2024-12-08T05:51:45,210 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0d942cb2025d,40823,1733637104426, state=OPEN 2024-12-08T05:51:45,217 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39283-0x10190a0cd820000, quorum=127.0.0.1:64902, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T05:51:45,217 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40823-0x10190a0cd820001, quorum=127.0.0.1:64902, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T05:51:45,217 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=0d942cb2025d,40823,1733637104426 2024-12-08T05:51:45,217 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T05:51:45,217 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T05:51:45,220 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-08T05:51:45,220 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=0d942cb2025d,40823,1733637104426 in 200 msec 2024-12-08T05:51:45,222 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-08T05:51:45,222 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 611 msec 2024-12-08T05:51:45,223 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T05:51:45,223 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-08T05:51:45,224 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-08T05:51:45,224 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=0d942cb2025d,40823,1733637104426, seqNum=-1] 2024-12-08T05:51:45,224 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T05:51:45,226 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32769, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T05:51:45,231 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 655 msec 2024-12-08T05:51:45,231 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733637105231, completionTime=-1 2024-12-08T05:51:45,231 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-08T05:51:45,231 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-08T05:51:45,233 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-08T05:51:45,233 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733637165233 2024-12-08T05:51:45,233 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733637225233 2024-12-08T05:51:45,233 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-12-08T05:51:45,233 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,39283,1733637104385-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:45,233 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,39283,1733637104385-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:45,233 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,39283,1733637104385-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:45,233 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-0d942cb2025d:39283, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:45,234 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:45,234 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:45,235 DEBUG [master/0d942cb2025d:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-08T05:51:45,237 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.777sec 2024-12-08T05:51:45,237 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-08T05:51:45,237 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-08T05:51:45,237 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-08T05:51:45,237 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-08T05:51:45,237 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-08T05:51:45,237 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,39283,1733637104385-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T05:51:45,237 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,39283,1733637104385-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-08T05:51:45,239 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5acc7ef, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T05:51:45,239 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 0d942cb2025d,39283,-1 for getting cluster id 2024-12-08T05:51:45,239 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-08T05:51:45,239 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-08T05:51:45,239 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-08T05:51:45,239 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,39283,1733637104385-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T05:51:45,242 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f53bd42f-75e2-43ed-ad29-fd8eeaaabf32' 2024-12-08T05:51:45,243 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-08T05:51:45,243 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f53bd42f-75e2-43ed-ad29-fd8eeaaabf32" 2024-12-08T05:51:45,243 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@42eee027, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T05:51:45,243 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [0d942cb2025d,39283,-1] 2024-12-08T05:51:45,243 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-08T05:51:45,244 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T05:51:45,245 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45928, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-08T05:51:45,245 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2128b0cd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T05:51:45,246 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-08T05:51:45,247 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=0d942cb2025d,40823,1733637104426, seqNum=-1] 2024-12-08T05:51:45,247 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T05:51:45,248 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59444, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T05:51:45,249 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=0d942cb2025d,39283,1733637104385 2024-12-08T05:51:45,249 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:51:45,252 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-08T05:51:45,252 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-08T05:51:45,253 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is 0d942cb2025d,39283,1733637104385 2024-12-08T05:51:45,253 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@49eb01a 2024-12-08T05:51:45,253 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-08T05:51:45,254 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45944, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-08T05:51:45,255 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39283 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-08T05:51:45,255 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39283 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-08T05:51:45,255 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39283 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T05:51:45,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39283 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-08T05:51:45,257 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-08T05:51:45,257 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:51:45,257 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39283 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-12-08T05:51:45,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39283 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-08T05:51:45,259 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-08T05:51:45,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33893 is added to blk_1073741835_1011 (size=405) 2024-12-08T05:51:45,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39511 is added to blk_1073741835_1011 (size=405) 2024-12-08T05:51:45,268 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => afb037aa409815ebd49c6b835900b6bf, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733637105254.afb037aa409815ebd49c6b835900b6bf.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158 2024-12-08T05:51:45,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39511 is added to blk_1073741836_1012 (size=88) 2024-12-08T05:51:45,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33893 is added to blk_1073741836_1012 (size=88) 2024-12-08T05:51:45,274 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733637105254.afb037aa409815ebd49c6b835900b6bf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T05:51:45,274 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing afb037aa409815ebd49c6b835900b6bf, disabling compactions & flushes 2024-12-08T05:51:45,274 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733637105254.afb037aa409815ebd49c6b835900b6bf. 2024-12-08T05:51:45,274 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733637105254.afb037aa409815ebd49c6b835900b6bf. 2024-12-08T05:51:45,274 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733637105254.afb037aa409815ebd49c6b835900b6bf. after waiting 0 ms 2024-12-08T05:51:45,274 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733637105254.afb037aa409815ebd49c6b835900b6bf. 2024-12-08T05:51:45,274 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733637105254.afb037aa409815ebd49c6b835900b6bf. 2024-12-08T05:51:45,274 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for afb037aa409815ebd49c6b835900b6bf: Waiting for close lock at 1733637105274Disabling compacts and flushes for region at 1733637105274Disabling writes for close at 1733637105274Writing region close event to WAL at 1733637105274Closed at 1733637105274 2024-12-08T05:51:45,276 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-08T05:51:45,276 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733637105254.afb037aa409815ebd49c6b835900b6bf.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1733637105276"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733637105276"}]},"ts":"1733637105276"} 2024-12-08T05:51:45,278 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-08T05:51:45,279 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-08T05:51:45,280 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733637105280"}]},"ts":"1733637105280"} 2024-12-08T05:51:45,282 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-12-08T05:51:45,282 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=afb037aa409815ebd49c6b835900b6bf, ASSIGN}] 2024-12-08T05:51:45,283 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=afb037aa409815ebd49c6b835900b6bf, ASSIGN 2024-12-08T05:51:45,284 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=afb037aa409815ebd49c6b835900b6bf, ASSIGN; state=OFFLINE, location=0d942cb2025d,40823,1733637104426; forceNewPlan=false, retain=false 2024-12-08T05:51:45,435 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=afb037aa409815ebd49c6b835900b6bf, regionState=OPENING, regionLocation=0d942cb2025d,40823,1733637104426 2024-12-08T05:51:45,438 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=afb037aa409815ebd49c6b835900b6bf, ASSIGN because future has completed 2024-12-08T05:51:45,438 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure afb037aa409815ebd49c6b835900b6bf, server=0d942cb2025d,40823,1733637104426}] 2024-12-08T05:51:45,595 INFO [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733637105254.afb037aa409815ebd49c6b835900b6bf. 2024-12-08T05:51:45,595 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => afb037aa409815ebd49c6b835900b6bf, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733637105254.afb037aa409815ebd49c6b835900b6bf.', STARTKEY => '', ENDKEY => ''} 2024-12-08T05:51:45,596 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling afb037aa409815ebd49c6b835900b6bf 2024-12-08T05:51:45,596 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733637105254.afb037aa409815ebd49c6b835900b6bf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T05:51:45,596 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for afb037aa409815ebd49c6b835900b6bf 2024-12-08T05:51:45,596 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for afb037aa409815ebd49c6b835900b6bf 2024-12-08T05:51:45,597 INFO [StoreOpener-afb037aa409815ebd49c6b835900b6bf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region afb037aa409815ebd49c6b835900b6bf 2024-12-08T05:51:45,598 INFO [StoreOpener-afb037aa409815ebd49c6b835900b6bf-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region afb037aa409815ebd49c6b835900b6bf columnFamilyName info 2024-12-08T05:51:45,598 DEBUG [StoreOpener-afb037aa409815ebd49c6b835900b6bf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:51:45,599 INFO [StoreOpener-afb037aa409815ebd49c6b835900b6bf-1 {}] regionserver.HStore(327): Store=afb037aa409815ebd49c6b835900b6bf/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T05:51:45,599 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for afb037aa409815ebd49c6b835900b6bf 2024-12-08T05:51:45,600 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/afb037aa409815ebd49c6b835900b6bf 2024-12-08T05:51:45,600 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/afb037aa409815ebd49c6b835900b6bf 2024-12-08T05:51:45,600 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for afb037aa409815ebd49c6b835900b6bf 2024-12-08T05:51:45,601 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for afb037aa409815ebd49c6b835900b6bf 2024-12-08T05:51:45,602 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for afb037aa409815ebd49c6b835900b6bf 2024-12-08T05:51:45,605 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/afb037aa409815ebd49c6b835900b6bf/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T05:51:45,605 INFO [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened afb037aa409815ebd49c6b835900b6bf; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=815043, jitterRate=0.036381810903549194}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-08T05:51:45,605 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for afb037aa409815ebd49c6b835900b6bf 2024-12-08T05:51:45,606 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for afb037aa409815ebd49c6b835900b6bf: Running coprocessor pre-open hook at 1733637105596Writing region info on filesystem at 1733637105596Initializing all the Stores at 1733637105597 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733637105597Cleaning up temporary data from old regions at 1733637105601 (+4 ms)Running coprocessor post-open hooks at 1733637105605 (+4 ms)Region opened successfully at 1733637105606 (+1 ms) 2024-12-08T05:51:45,607 INFO [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733637105254.afb037aa409815ebd49c6b835900b6bf., pid=6, masterSystemTime=1733637105591 2024-12-08T05:51:45,611 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733637105254.afb037aa409815ebd49c6b835900b6bf. 2024-12-08T05:51:45,611 INFO [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733637105254.afb037aa409815ebd49c6b835900b6bf. 2024-12-08T05:51:45,612 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=afb037aa409815ebd49c6b835900b6bf, regionState=OPEN, openSeqNum=2, regionLocation=0d942cb2025d,40823,1733637104426 2024-12-08T05:51:45,615 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure afb037aa409815ebd49c6b835900b6bf, server=0d942cb2025d,40823,1733637104426 because future has completed 2024-12-08T05:51:45,619 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-08T05:51:45,619 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure afb037aa409815ebd49c6b835900b6bf, server=0d942cb2025d,40823,1733637104426 in 178 msec 2024-12-08T05:51:45,622 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-08T05:51:45,622 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=afb037aa409815ebd49c6b835900b6bf, ASSIGN in 337 msec 2024-12-08T05:51:45,623 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-08T05:51:45,623 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733637105623"}]},"ts":"1733637105623"} 2024-12-08T05:51:45,625 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-12-08T05:51:45,626 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-08T05:51:45,628 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 371 msec 2024-12-08T05:51:45,648 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-08T05:51:45,648 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-08T05:51:45,649 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-08T05:51:45,649 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-08T05:51:45,649 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-08T05:51:45,649 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-12-08T05:51:45,723 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:45,725 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:46,723 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:46,725 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:47,724 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:47,725 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:48,725 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:48,726 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:49,725 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:49,726 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:50,696 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-08T05:51:50,697 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:50,697 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:50,697 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:50,697 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:50,698 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:50,698 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:50,715 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:50,716 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:50,716 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:50,716 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:50,717 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:50,717 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:50,719 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:50,720 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:50,720 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:50,722 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:51:50,726 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:50,727 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:50,727 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-08T05:51:50,728 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-12-08T05:51:51,726 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:51,727 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:52,727 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:52,728 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:53,728 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:53,728 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:54,728 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:54,729 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:55,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39283 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-08T05:51:55,337 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-08T05:51:55,337 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-12-08T05:51:55,340 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-08T05:51:55,340 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733637105254.afb037aa409815ebd49c6b835900b6bf. 2024-12-08T05:51:55,343 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733637105254.afb037aa409815ebd49c6b835900b6bf., hostname=0d942cb2025d,40823,1733637104426, seqNum=2] 2024-12-08T05:51:55,349 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39283 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-08T05:51:55,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39283 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-08T05:51:55,356 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-08T05:51:55,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39283 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-08T05:51:55,357 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T05:51:55,358 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T05:51:55,518 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40823 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-08T05:51:55,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733637105254.afb037aa409815ebd49c6b835900b6bf. 2024-12-08T05:51:55,519 INFO [RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing afb037aa409815ebd49c6b835900b6bf 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-08T05:51:55,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/afb037aa409815ebd49c6b835900b6bf/.tmp/info/dd6f594897f6468db69d9219a8a5f4dc is 1080, key is row0001/info:/1733637115344/Put/seqid=0 2024-12-08T05:51:55,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39511 is added to blk_1073741837_1013 (size=6033) 2024-12-08T05:51:55,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33893 is added to blk_1073741837_1013 (size=6033) 2024-12-08T05:51:55,547 INFO [RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/afb037aa409815ebd49c6b835900b6bf/.tmp/info/dd6f594897f6468db69d9219a8a5f4dc 2024-12-08T05:51:55,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/afb037aa409815ebd49c6b835900b6bf/.tmp/info/dd6f594897f6468db69d9219a8a5f4dc as hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/afb037aa409815ebd49c6b835900b6bf/info/dd6f594897f6468db69d9219a8a5f4dc 2024-12-08T05:51:55,559 INFO [RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/afb037aa409815ebd49c6b835900b6bf/info/dd6f594897f6468db69d9219a8a5f4dc, entries=1, sequenceid=5, filesize=5.9 K 2024-12-08T05:51:55,560 INFO [RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for afb037aa409815ebd49c6b835900b6bf in 41ms, sequenceid=5, compaction requested=false 2024-12-08T05:51:55,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for afb037aa409815ebd49c6b835900b6bf: 2024-12-08T05:51:55,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733637105254.afb037aa409815ebd49c6b835900b6bf. 2024-12-08T05:51:55,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-08T05:51:55,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39283 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-08T05:51:55,568 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-08T05:51:55,568 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 207 msec 2024-12-08T05:51:55,570 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 217 msec 2024-12-08T05:51:55,729 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:55,729 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:56,729 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:56,729 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:57,730 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:57,730 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:58,730 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:58,730 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:59,731 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:51:59,731 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:00,732 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:00,732 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:01,732 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:01,732 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:02,733 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:02,733 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:03,734 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:03,734 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:04,734 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:04,734 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:05,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39283 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-08T05:52:05,458 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-08T05:52:05,461 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39283 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-08T05:52:05,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39283 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-08T05:52:05,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39283 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-12-08T05:52:05,463 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-08T05:52:05,464 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T05:52:05,464 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T05:52:05,617 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40823 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-12-08T05:52:05,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733637105254.afb037aa409815ebd49c6b835900b6bf. 2024-12-08T05:52:05,617 INFO [RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing afb037aa409815ebd49c6b835900b6bf 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-08T05:52:05,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/afb037aa409815ebd49c6b835900b6bf/.tmp/info/dbe3a10a958c4c078dfc79df261202e2 is 1080, key is row0002/info:/1733637125459/Put/seqid=0 2024-12-08T05:52:05,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33893 is added to blk_1073741838_1014 (size=6033) 2024-12-08T05:52:05,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39511 is added to blk_1073741838_1014 (size=6033) 2024-12-08T05:52:05,630 INFO [RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/afb037aa409815ebd49c6b835900b6bf/.tmp/info/dbe3a10a958c4c078dfc79df261202e2 2024-12-08T05:52:05,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/afb037aa409815ebd49c6b835900b6bf/.tmp/info/dbe3a10a958c4c078dfc79df261202e2 as hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/afb037aa409815ebd49c6b835900b6bf/info/dbe3a10a958c4c078dfc79df261202e2 2024-12-08T05:52:05,642 INFO [RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/afb037aa409815ebd49c6b835900b6bf/info/dbe3a10a958c4c078dfc79df261202e2, entries=1, sequenceid=9, filesize=5.9 K 2024-12-08T05:52:05,643 INFO [RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for afb037aa409815ebd49c6b835900b6bf in 26ms, sequenceid=9, compaction requested=false 2024-12-08T05:52:05,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for afb037aa409815ebd49c6b835900b6bf: 2024-12-08T05:52:05,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733637105254.afb037aa409815ebd49c6b835900b6bf. 2024-12-08T05:52:05,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-12-08T05:52:05,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39283 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-12-08T05:52:05,647 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-12-08T05:52:05,647 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 181 msec 2024-12-08T05:52:05,649 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 187 msec 2024-12-08T05:52:05,735 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:05,735 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:06,735 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:06,735 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:07,736 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:07,736 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:08,737 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:08,737 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:09,737 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:09,737 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:10,738 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:10,738 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:10,739 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta after 68036ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor191.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:52:10,739 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 after 68048ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor191.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T05:52:11,739 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:11,739 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:12,740 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:12,740 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:13,740 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:13,740 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:14,369 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-08T05:52:14,741 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:14,741 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:15,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39283 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-12-08T05:52:15,527 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-08T05:52:15,529 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0d942cb2025d%2C40823%2C1733637104426.1733637135529 2024-12-08T05:52:15,537 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:52:15,537 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:52:15,537 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:52:15,537 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:52:15,537 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:52:15,537 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/WALs/0d942cb2025d,40823,1733637104426/0d942cb2025d%2C40823%2C1733637104426.1733637104807 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/WALs/0d942cb2025d,40823,1733637104426/0d942cb2025d%2C40823%2C1733637104426.1733637135529 2024-12-08T05:52:15,540 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36267:36267),(127.0.0.1/127.0.0.1:38183:38183)] 2024-12-08T05:52:15,540 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/WALs/0d942cb2025d,40823,1733637104426/0d942cb2025d%2C40823%2C1733637104426.1733637104807 is not closed yet, will try archiving it next time 2024-12-08T05:52:15,541 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39283 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-08T05:52:15,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33893 is added to blk_1073741833_1009 (size=5546) 2024-12-08T05:52:15,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39511 is added to blk_1073741833_1009 (size=5546) 2024-12-08T05:52:15,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39283 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-08T05:52:15,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39283 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-12-08T05:52:15,544 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-08T05:52:15,545 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T05:52:15,545 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T05:52:15,698 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40823 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-12-08T05:52:15,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733637105254.afb037aa409815ebd49c6b835900b6bf. 2024-12-08T05:52:15,698 INFO [RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing afb037aa409815ebd49c6b835900b6bf 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-08T05:52:15,703 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/afb037aa409815ebd49c6b835900b6bf/.tmp/info/66c49b99baeb4f27ab9b877175ebd172 is 1080, key is row0003/info:/1733637135528/Put/seqid=0 2024-12-08T05:52:15,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39511 is added to blk_1073741840_1016 (size=6033) 2024-12-08T05:52:15,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33893 is added to blk_1073741840_1016 (size=6033) 2024-12-08T05:52:15,708 INFO [RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/afb037aa409815ebd49c6b835900b6bf/.tmp/info/66c49b99baeb4f27ab9b877175ebd172 2024-12-08T05:52:15,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/afb037aa409815ebd49c6b835900b6bf/.tmp/info/66c49b99baeb4f27ab9b877175ebd172 as hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/afb037aa409815ebd49c6b835900b6bf/info/66c49b99baeb4f27ab9b877175ebd172 2024-12-08T05:52:15,719 INFO [RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/afb037aa409815ebd49c6b835900b6bf/info/66c49b99baeb4f27ab9b877175ebd172, entries=1, sequenceid=13, filesize=5.9 K 2024-12-08T05:52:15,720 INFO [RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for afb037aa409815ebd49c6b835900b6bf in 22ms, sequenceid=13, compaction requested=true 2024-12-08T05:52:15,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for afb037aa409815ebd49c6b835900b6bf: 2024-12-08T05:52:15,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733637105254.afb037aa409815ebd49c6b835900b6bf. 2024-12-08T05:52:15,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-12-08T05:52:15,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39283 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-12-08T05:52:15,724 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-12-08T05:52:15,724 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 177 msec 2024-12-08T05:52:15,727 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 184 msec 2024-12-08T05:52:15,742 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:15,742 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:16,742 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:16,742 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:17,743 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:17,743 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:18,743 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:18,743 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:19,744 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:19,744 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:20,745 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:20,745 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:21,745 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:21,745 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:22,746 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:22,746 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:23,746 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:23,746 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:24,747 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:24,747 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:25,241 INFO [master/0d942cb2025d:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-08T05:52:25,241 INFO [master/0d942cb2025d:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-08T05:52:25,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39283 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-12-08T05:52:25,577 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-08T05:52:25,577 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T05:52:25,579 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T05:52:25,579 DEBUG [Time-limited test {}] regionserver.HStore(1541): afb037aa409815ebd49c6b835900b6bf/info is initiating minor compaction (all files) 2024-12-08T05:52:25,579 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-08T05:52:25,579 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T05:52:25,579 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of afb037aa409815ebd49c6b835900b6bf/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733637105254.afb037aa409815ebd49c6b835900b6bf. 2024-12-08T05:52:25,579 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/afb037aa409815ebd49c6b835900b6bf/info/dd6f594897f6468db69d9219a8a5f4dc, hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/afb037aa409815ebd49c6b835900b6bf/info/dbe3a10a958c4c078dfc79df261202e2, hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/afb037aa409815ebd49c6b835900b6bf/info/66c49b99baeb4f27ab9b877175ebd172] into tmpdir=hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/afb037aa409815ebd49c6b835900b6bf/.tmp, totalSize=17.7 K 2024-12-08T05:52:25,580 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting dd6f594897f6468db69d9219a8a5f4dc, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1733637115344 2024-12-08T05:52:25,580 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting dbe3a10a958c4c078dfc79df261202e2, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1733637125459 2024-12-08T05:52:25,580 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 66c49b99baeb4f27ab9b877175ebd172, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733637135528 2024-12-08T05:52:25,591 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): afb037aa409815ebd49c6b835900b6bf#info#compaction#44 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T05:52:25,592 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/afb037aa409815ebd49c6b835900b6bf/.tmp/info/712c976ed792412c84b5fb6b58695686 is 1080, key is row0001/info:/1733637115344/Put/seqid=0 2024-12-08T05:52:25,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33893 is added to blk_1073741841_1017 (size=8296) 2024-12-08T05:52:25,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39511 is added to blk_1073741841_1017 (size=8296) 2024-12-08T05:52:25,602 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/afb037aa409815ebd49c6b835900b6bf/.tmp/info/712c976ed792412c84b5fb6b58695686 as hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/afb037aa409815ebd49c6b835900b6bf/info/712c976ed792412c84b5fb6b58695686 2024-12-08T05:52:25,608 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in afb037aa409815ebd49c6b835900b6bf/info of afb037aa409815ebd49c6b835900b6bf into 712c976ed792412c84b5fb6b58695686(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T05:52:25,608 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for afb037aa409815ebd49c6b835900b6bf: 2024-12-08T05:52:25,611 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0d942cb2025d%2C40823%2C1733637104426.1733637145611 2024-12-08T05:52:25,619 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:52:25,619 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:52:25,620 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:52:25,620 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:52:25,620 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:52:25,620 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/WALs/0d942cb2025d,40823,1733637104426/0d942cb2025d%2C40823%2C1733637104426.1733637135529 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/WALs/0d942cb2025d,40823,1733637104426/0d942cb2025d%2C40823%2C1733637104426.1733637145611 2024-12-08T05:52:25,621 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36267:36267),(127.0.0.1/127.0.0.1:38183:38183)] 2024-12-08T05:52:25,621 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/WALs/0d942cb2025d,40823,1733637104426/0d942cb2025d%2C40823%2C1733637104426.1733637135529 is not closed yet, will try archiving it next time 2024-12-08T05:52:25,621 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/WALs/0d942cb2025d,40823,1733637104426/0d942cb2025d%2C40823%2C1733637104426.1733637104807 to hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/oldWALs/0d942cb2025d%2C40823%2C1733637104426.1733637104807 2024-12-08T05:52:25,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33893 is added to blk_1073741839_1015 (size=2520) 2024-12-08T05:52:25,622 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39283 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-08T05:52:25,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39511 is added to blk_1073741839_1015 (size=2520) 2024-12-08T05:52:25,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39283 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-08T05:52:25,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39283 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-12-08T05:52:25,625 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-08T05:52:25,626 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T05:52:25,626 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T05:52:25,748 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:25,748 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:25,778 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40823 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-12-08T05:52:25,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733637105254.afb037aa409815ebd49c6b835900b6bf. 2024-12-08T05:52:25,778 INFO [RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing afb037aa409815ebd49c6b835900b6bf 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-08T05:52:25,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/afb037aa409815ebd49c6b835900b6bf/.tmp/info/2a3be346e2834ae3b158cbd16f1a1aa5 is 1080, key is row0000/info:/1733637145609/Put/seqid=0 2024-12-08T05:52:25,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39511 is added to blk_1073741843_1019 (size=6033) 2024-12-08T05:52:25,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33893 is added to blk_1073741843_1019 (size=6033) 2024-12-08T05:52:25,788 INFO [RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/afb037aa409815ebd49c6b835900b6bf/.tmp/info/2a3be346e2834ae3b158cbd16f1a1aa5 2024-12-08T05:52:25,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/afb037aa409815ebd49c6b835900b6bf/.tmp/info/2a3be346e2834ae3b158cbd16f1a1aa5 as hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/afb037aa409815ebd49c6b835900b6bf/info/2a3be346e2834ae3b158cbd16f1a1aa5 2024-12-08T05:52:25,798 INFO [RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/afb037aa409815ebd49c6b835900b6bf/info/2a3be346e2834ae3b158cbd16f1a1aa5, entries=1, sequenceid=18, filesize=5.9 K 2024-12-08T05:52:25,799 INFO [RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for afb037aa409815ebd49c6b835900b6bf in 21ms, sequenceid=18, compaction requested=false 2024-12-08T05:52:25,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for afb037aa409815ebd49c6b835900b6bf: 2024-12-08T05:52:25,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733637105254.afb037aa409815ebd49c6b835900b6bf. 2024-12-08T05:52:25,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-12-08T05:52:25,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39283 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-12-08T05:52:25,803 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-12-08T05:52:25,803 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 175 msec 2024-12-08T05:52:25,806 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 182 msec 2024-12-08T05:52:26,748 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:26,748 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:27,749 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:27,749 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:28,750 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:28,750 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:29,750 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:29,750 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:30,596 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region afb037aa409815ebd49c6b835900b6bf, had cached 0 bytes from a total of 14329 2024-12-08T05:52:30,751 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:30,751 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:31,751 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:31,751 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:32,752 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:32,752 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:33,753 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:33,753 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:34,753 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:34,753 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:35,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39283 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-12-08T05:52:35,697 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-08T05:52:35,700 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0d942cb2025d%2C40823%2C1733637104426.1733637155700 2024-12-08T05:52:35,706 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:52:35,706 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:52:35,706 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:52:35,706 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:52:35,706 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:52:35,707 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/WALs/0d942cb2025d,40823,1733637104426/0d942cb2025d%2C40823%2C1733637104426.1733637145611 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/WALs/0d942cb2025d,40823,1733637104426/0d942cb2025d%2C40823%2C1733637104426.1733637155700 2024-12-08T05:52:35,707 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38183:38183),(127.0.0.1/127.0.0.1:36267:36267)] 2024-12-08T05:52:35,707 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/WALs/0d942cb2025d,40823,1733637104426/0d942cb2025d%2C40823%2C1733637104426.1733637145611 is not closed yet, will try archiving it next time 2024-12-08T05:52:35,708 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-08T05:52:35,708 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/WALs/0d942cb2025d,40823,1733637104426/0d942cb2025d%2C40823%2C1733637104426.1733637135529 to hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/oldWALs/0d942cb2025d%2C40823%2C1733637104426.1733637135529 2024-12-08T05:52:35,708 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-08T05:52:35,708 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T05:52:35,708 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T05:52:35,708 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T05:52:35,708 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-08T05:52:35,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33893 is added to blk_1073741842_1018 (size=2026) 2024-12-08T05:52:35,708 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1055589973, stopped=false 2024-12-08T05:52:35,708 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=0d942cb2025d,39283,1733637104385 2024-12-08T05:52:35,709 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-08T05:52:35,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39511 is added to blk_1073741842_1018 (size=2026) 2024-12-08T05:52:35,710 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39283-0x10190a0cd820000, quorum=127.0.0.1:64902, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T05:52:35,710 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40823-0x10190a0cd820001, quorum=127.0.0.1:64902, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T05:52:35,710 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39283-0x10190a0cd820000, quorum=127.0.0.1:64902, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:52:35,710 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40823-0x10190a0cd820001, quorum=127.0.0.1:64902, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:52:35,710 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-08T05:52:35,711 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:39283-0x10190a0cd820000, quorum=127.0.0.1:64902, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T05:52:35,711 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-08T05:52:35,711 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T05:52:35,711 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T05:52:35,711 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40823-0x10190a0cd820001, quorum=127.0.0.1:64902, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T05:52:35,712 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '0d942cb2025d,40823,1733637104426' ***** 2024-12-08T05:52:35,712 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-08T05:52:35,712 INFO [RS:0;0d942cb2025d:40823 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-08T05:52:35,712 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-08T05:52:35,712 INFO [RS:0;0d942cb2025d:40823 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-08T05:52:35,712 INFO [RS:0;0d942cb2025d:40823 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-08T05:52:35,712 INFO [RS:0;0d942cb2025d:40823 {}] regionserver.HRegionServer(3091): Received CLOSE for afb037aa409815ebd49c6b835900b6bf 2024-12-08T05:52:35,713 INFO [RS:0;0d942cb2025d:40823 {}] regionserver.HRegionServer(959): stopping server 0d942cb2025d,40823,1733637104426 2024-12-08T05:52:35,713 INFO [RS:0;0d942cb2025d:40823 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T05:52:35,713 INFO [RS:0;0d942cb2025d:40823 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;0d942cb2025d:40823. 2024-12-08T05:52:35,713 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing afb037aa409815ebd49c6b835900b6bf, disabling compactions & flushes 2024-12-08T05:52:35,713 DEBUG [RS:0;0d942cb2025d:40823 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T05:52:35,713 INFO [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733637105254.afb037aa409815ebd49c6b835900b6bf. 2024-12-08T05:52:35,713 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733637105254.afb037aa409815ebd49c6b835900b6bf. 2024-12-08T05:52:35,713 DEBUG [RS:0;0d942cb2025d:40823 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T05:52:35,713 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733637105254.afb037aa409815ebd49c6b835900b6bf. after waiting 0 ms 2024-12-08T05:52:35,713 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733637105254.afb037aa409815ebd49c6b835900b6bf. 2024-12-08T05:52:35,713 INFO [RS:0;0d942cb2025d:40823 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-08T05:52:35,713 INFO [RS:0;0d942cb2025d:40823 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-08T05:52:35,713 INFO [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing afb037aa409815ebd49c6b835900b6bf 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-08T05:52:35,713 INFO [RS:0;0d942cb2025d:40823 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-08T05:52:35,713 INFO [RS:0;0d942cb2025d:40823 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-08T05:52:35,716 INFO [RS:0;0d942cb2025d:40823 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-08T05:52:35,716 DEBUG [RS:0;0d942cb2025d:40823 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, afb037aa409815ebd49c6b835900b6bf=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733637105254.afb037aa409815ebd49c6b835900b6bf.} 2024-12-08T05:52:35,716 DEBUG [RS:0;0d942cb2025d:40823 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, afb037aa409815ebd49c6b835900b6bf 2024-12-08T05:52:35,716 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-08T05:52:35,716 INFO [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-08T05:52:35,716 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-08T05:52:35,716 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-08T05:52:35,716 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-08T05:52:35,717 INFO [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-12-08T05:52:35,723 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/afb037aa409815ebd49c6b835900b6bf/.tmp/info/3aa9487509264f98b35801bf09264c27 is 1080, key is row0001/info:/1733637155699/Put/seqid=0 2024-12-08T05:52:35,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33893 is added to blk_1073741845_1021 (size=6033) 2024-12-08T05:52:35,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39511 is added to blk_1073741845_1021 (size=6033) 2024-12-08T05:52:35,729 INFO [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/afb037aa409815ebd49c6b835900b6bf/.tmp/info/3aa9487509264f98b35801bf09264c27 2024-12-08T05:52:35,736 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/afb037aa409815ebd49c6b835900b6bf/.tmp/info/3aa9487509264f98b35801bf09264c27 as hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/afb037aa409815ebd49c6b835900b6bf/info/3aa9487509264f98b35801bf09264c27 2024-12-08T05:52:35,736 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/data/hbase/meta/1588230740/.tmp/info/8393bf4268e34867a5ababfb3a35b7e3 is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733637105254.afb037aa409815ebd49c6b835900b6bf./info:regioninfo/1733637105612/Put/seqid=0 2024-12-08T05:52:35,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39511 is added to blk_1073741846_1022 (size=7308) 2024-12-08T05:52:35,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33893 is added to blk_1073741846_1022 (size=7308) 2024-12-08T05:52:35,741 INFO [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/data/hbase/meta/1588230740/.tmp/info/8393bf4268e34867a5ababfb3a35b7e3 2024-12-08T05:52:35,741 INFO [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/afb037aa409815ebd49c6b835900b6bf/info/3aa9487509264f98b35801bf09264c27, entries=1, sequenceid=22, filesize=5.9 K 2024-12-08T05:52:35,742 INFO [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for afb037aa409815ebd49c6b835900b6bf in 29ms, sequenceid=22, compaction requested=true 2024-12-08T05:52:35,743 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733637105254.afb037aa409815ebd49c6b835900b6bf.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/afb037aa409815ebd49c6b835900b6bf/info/dd6f594897f6468db69d9219a8a5f4dc, hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/afb037aa409815ebd49c6b835900b6bf/info/dbe3a10a958c4c078dfc79df261202e2, hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/afb037aa409815ebd49c6b835900b6bf/info/66c49b99baeb4f27ab9b877175ebd172] to archive 2024-12-08T05:52:35,744 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733637105254.afb037aa409815ebd49c6b835900b6bf.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-08T05:52:35,745 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733637105254.afb037aa409815ebd49c6b835900b6bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/afb037aa409815ebd49c6b835900b6bf/info/dd6f594897f6468db69d9219a8a5f4dc to hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/afb037aa409815ebd49c6b835900b6bf/info/dd6f594897f6468db69d9219a8a5f4dc 2024-12-08T05:52:35,747 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733637105254.afb037aa409815ebd49c6b835900b6bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/afb037aa409815ebd49c6b835900b6bf/info/dbe3a10a958c4c078dfc79df261202e2 to hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/afb037aa409815ebd49c6b835900b6bf/info/dbe3a10a958c4c078dfc79df261202e2 2024-12-08T05:52:35,748 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733637105254.afb037aa409815ebd49c6b835900b6bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/afb037aa409815ebd49c6b835900b6bf/info/66c49b99baeb4f27ab9b877175ebd172 to hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/afb037aa409815ebd49c6b835900b6bf/info/66c49b99baeb4f27ab9b877175ebd172 2024-12-08T05:52:35,748 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733637105254.afb037aa409815ebd49c6b835900b6bf.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=0d942cb2025d:39283 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-12-08T05:52:35,749 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733637105254.afb037aa409815ebd49c6b835900b6bf.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [dd6f594897f6468db69d9219a8a5f4dc=6033, dbe3a10a958c4c078dfc79df261202e2=6033, 66c49b99baeb4f27ab9b877175ebd172=6033] 2024-12-08T05:52:35,752 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/afb037aa409815ebd49c6b835900b6bf/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-12-08T05:52:35,753 INFO [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733637105254.afb037aa409815ebd49c6b835900b6bf. 2024-12-08T05:52:35,753 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for afb037aa409815ebd49c6b835900b6bf: Waiting for close lock at 1733637155713Running coprocessor pre-close hooks at 1733637155713Disabling compacts and flushes for region at 1733637155713Disabling writes for close at 1733637155713Obtaining lock to block concurrent updates at 1733637155713Preparing flush snapshotting stores in afb037aa409815ebd49c6b835900b6bf at 1733637155713Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733637105254.afb037aa409815ebd49c6b835900b6bf., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1733637155713Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733637105254.afb037aa409815ebd49c6b835900b6bf. at 1733637155714 (+1 ms)Flushing afb037aa409815ebd49c6b835900b6bf/info: creating writer at 1733637155714Flushing afb037aa409815ebd49c6b835900b6bf/info: appending metadata at 1733637155723 (+9 ms)Flushing afb037aa409815ebd49c6b835900b6bf/info: closing flushed file at 1733637155723Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6f2bea2e: reopening flushed file at 1733637155735 (+12 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for afb037aa409815ebd49c6b835900b6bf in 29ms, sequenceid=22, compaction requested=true at 1733637155742 (+7 ms)Writing region close event to WAL at 1733637155749 (+7 ms)Running coprocessor post-close hooks at 1733637155753 (+4 ms)Closed at 1733637155753 2024-12-08T05:52:35,753 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733637105254.afb037aa409815ebd49c6b835900b6bf. 2024-12-08T05:52:35,754 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:35,754 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:35,768 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/data/hbase/meta/1588230740/.tmp/ns/652027ce7b5940c183c86a44b04cfba3 is 43, key is default/ns:d/1733637105226/Put/seqid=0 2024-12-08T05:52:35,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39511 is added to blk_1073741847_1023 (size=5153) 2024-12-08T05:52:35,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33893 is added to blk_1073741847_1023 (size=5153) 2024-12-08T05:52:35,773 INFO [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/data/hbase/meta/1588230740/.tmp/ns/652027ce7b5940c183c86a44b04cfba3 2024-12-08T05:52:35,791 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/data/hbase/meta/1588230740/.tmp/table/f5ed78635d1e483e9cc26207a394ee0c is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1733637105623/Put/seqid=0 2024-12-08T05:52:35,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33893 is added to blk_1073741848_1024 (size=5508) 2024-12-08T05:52:35,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39511 is added to blk_1073741848_1024 (size=5508) 2024-12-08T05:52:35,796 INFO [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/data/hbase/meta/1588230740/.tmp/table/f5ed78635d1e483e9cc26207a394ee0c 2024-12-08T05:52:35,801 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/data/hbase/meta/1588230740/.tmp/info/8393bf4268e34867a5ababfb3a35b7e3 as hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/data/hbase/meta/1588230740/info/8393bf4268e34867a5ababfb3a35b7e3 2024-12-08T05:52:35,805 INFO [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/data/hbase/meta/1588230740/info/8393bf4268e34867a5ababfb3a35b7e3, entries=10, sequenceid=11, filesize=7.1 K 2024-12-08T05:52:35,806 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/data/hbase/meta/1588230740/.tmp/ns/652027ce7b5940c183c86a44b04cfba3 as hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/data/hbase/meta/1588230740/ns/652027ce7b5940c183c86a44b04cfba3 2024-12-08T05:52:35,810 INFO [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/data/hbase/meta/1588230740/ns/652027ce7b5940c183c86a44b04cfba3, entries=2, sequenceid=11, filesize=5.0 K 2024-12-08T05:52:35,811 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/data/hbase/meta/1588230740/.tmp/table/f5ed78635d1e483e9cc26207a394ee0c as hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/data/hbase/meta/1588230740/table/f5ed78635d1e483e9cc26207a394ee0c 2024-12-08T05:52:35,815 INFO [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/data/hbase/meta/1588230740/table/f5ed78635d1e483e9cc26207a394ee0c, entries=2, sequenceid=11, filesize=5.4 K 2024-12-08T05:52:35,816 INFO [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 100ms, sequenceid=11, compaction requested=false 2024-12-08T05:52:35,821 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-08T05:52:35,821 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-08T05:52:35,821 INFO [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-08T05:52:35,821 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733637155716Running coprocessor pre-close hooks at 1733637155716Disabling compacts and flushes for region at 1733637155716Disabling writes for close at 1733637155716Obtaining lock to block concurrent updates at 1733637155717 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1733637155717Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1733637155717Flushing stores of hbase:meta,,1.1588230740 at 1733637155717Flushing 1588230740/info: creating writer at 1733637155718 (+1 ms)Flushing 1588230740/info: appending metadata at 1733637155735 (+17 ms)Flushing 1588230740/info: closing flushed file at 1733637155735Flushing 1588230740/ns: creating writer at 1733637155747 (+12 ms)Flushing 1588230740/ns: appending metadata at 1733637155767 (+20 ms)Flushing 1588230740/ns: closing flushed file at 1733637155767Flushing 1588230740/table: creating writer at 1733637155778 (+11 ms)Flushing 1588230740/table: appending metadata at 1733637155791 (+13 ms)Flushing 1588230740/table: closing flushed file at 1733637155791Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@235c6cac: reopening flushed file at 1733637155800 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1156ff18: reopening flushed file at 1733637155805 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3f2cbddf: reopening flushed file at 1733637155810 (+5 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 100ms, sequenceid=11, compaction requested=false at 1733637155816 (+6 ms)Writing region close event to WAL at 1733637155818 (+2 ms)Running coprocessor post-close hooks at 1733637155821 (+3 ms)Closed at 1733637155821 2024-12-08T05:52:35,821 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-08T05:52:35,916 INFO [RS:0;0d942cb2025d:40823 {}] regionserver.HRegionServer(976): stopping server 0d942cb2025d,40823,1733637104426; all regions closed. 2024-12-08T05:52:35,917 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:52:35,917 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:52:35,917 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:52:35,917 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:52:35,917 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:52:35,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39511 is added to blk_1073741834_1010 (size=3306) 2024-12-08T05:52:35,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33893 is added to blk_1073741834_1010 (size=3306) 2024-12-08T05:52:35,922 DEBUG [RS:0;0d942cb2025d:40823 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/oldWALs 2024-12-08T05:52:35,922 INFO [RS:0;0d942cb2025d:40823 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 0d942cb2025d%2C40823%2C1733637104426.meta:.meta(num 1733637105177) 2024-12-08T05:52:35,922 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:52:35,922 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:52:35,922 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:52:35,922 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:52:35,922 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:52:35,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39511 is added to blk_1073741844_1020 (size=1252) 2024-12-08T05:52:35,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33893 is added to blk_1073741844_1020 (size=1252) 2024-12-08T05:52:35,927 DEBUG [RS:0;0d942cb2025d:40823 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/oldWALs 2024-12-08T05:52:35,927 INFO [RS:0;0d942cb2025d:40823 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 0d942cb2025d%2C40823%2C1733637104426:(num 1733637155700) 2024-12-08T05:52:35,927 DEBUG [RS:0;0d942cb2025d:40823 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T05:52:35,927 INFO [RS:0;0d942cb2025d:40823 {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T05:52:35,927 INFO [RS:0;0d942cb2025d:40823 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T05:52:35,927 INFO [RS:0;0d942cb2025d:40823 {}] hbase.ChoreService(370): Chore service for: regionserver/0d942cb2025d:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-08T05:52:35,927 INFO [RS:0;0d942cb2025d:40823 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T05:52:35,927 INFO [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T05:52:35,928 INFO [RS:0;0d942cb2025d:40823 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40823 2024-12-08T05:52:35,930 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40823-0x10190a0cd820001, quorum=127.0.0.1:64902, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/0d942cb2025d,40823,1733637104426 2024-12-08T05:52:35,930 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39283-0x10190a0cd820000, quorum=127.0.0.1:64902, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T05:52:35,930 INFO [RS:0;0d942cb2025d:40823 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T05:52:35,931 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [0d942cb2025d,40823,1733637104426] 2024-12-08T05:52:35,933 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/0d942cb2025d,40823,1733637104426 already deleted, retry=false 2024-12-08T05:52:35,933 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 0d942cb2025d,40823,1733637104426 expired; onlineServers=0 2024-12-08T05:52:35,933 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '0d942cb2025d,39283,1733637104385' ***** 2024-12-08T05:52:35,933 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-08T05:52:35,933 INFO [M:0;0d942cb2025d:39283 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T05:52:35,933 INFO [M:0;0d942cb2025d:39283 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T05:52:35,933 DEBUG [M:0;0d942cb2025d:39283 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-08T05:52:35,933 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-08T05:52:35,933 DEBUG [M:0;0d942cb2025d:39283 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-08T05:52:35,933 DEBUG [master/0d942cb2025d:0:becomeActiveMaster-HFileCleaner.small.0-1733637104580 {}] cleaner.HFileCleaner(306): Exit Thread[master/0d942cb2025d:0:becomeActiveMaster-HFileCleaner.small.0-1733637104580,5,FailOnTimeoutGroup] 2024-12-08T05:52:35,933 DEBUG [master/0d942cb2025d:0:becomeActiveMaster-HFileCleaner.large.0-1733637104580 {}] cleaner.HFileCleaner(306): Exit Thread[master/0d942cb2025d:0:becomeActiveMaster-HFileCleaner.large.0-1733637104580,5,FailOnTimeoutGroup] 2024-12-08T05:52:35,933 INFO [M:0;0d942cb2025d:39283 {}] hbase.ChoreService(370): Chore service for: master/0d942cb2025d:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-08T05:52:35,934 INFO [M:0;0d942cb2025d:39283 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T05:52:35,934 DEBUG [M:0;0d942cb2025d:39283 {}] master.HMaster(1795): Stopping service threads 2024-12-08T05:52:35,934 INFO [M:0;0d942cb2025d:39283 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-08T05:52:35,934 INFO [M:0;0d942cb2025d:39283 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-08T05:52:35,934 INFO [M:0;0d942cb2025d:39283 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-08T05:52:35,934 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-08T05:52:35,935 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39283-0x10190a0cd820000, quorum=127.0.0.1:64902, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-08T05:52:35,935 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39283-0x10190a0cd820000, quorum=127.0.0.1:64902, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:52:35,935 DEBUG [M:0;0d942cb2025d:39283 {}] zookeeper.ZKUtil(347): master:39283-0x10190a0cd820000, quorum=127.0.0.1:64902, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-08T05:52:35,935 WARN [M:0;0d942cb2025d:39283 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-08T05:52:35,936 INFO [M:0;0d942cb2025d:39283 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/.lastflushedseqids 2024-12-08T05:52:35,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33893 is added to blk_1073741849_1025 (size=130) 2024-12-08T05:52:35,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39511 is added to blk_1073741849_1025 (size=130) 2024-12-08T05:52:35,941 INFO [M:0;0d942cb2025d:39283 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-08T05:52:35,942 INFO [M:0;0d942cb2025d:39283 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-08T05:52:35,942 DEBUG [M:0;0d942cb2025d:39283 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-08T05:52:35,942 INFO [M:0;0d942cb2025d:39283 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:52:35,942 DEBUG [M:0;0d942cb2025d:39283 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:52:35,942 DEBUG [M:0;0d942cb2025d:39283 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-08T05:52:35,942 DEBUG [M:0;0d942cb2025d:39283 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:52:35,942 INFO [M:0;0d942cb2025d:39283 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.55 KB heapSize=54.91 KB 2024-12-08T05:52:35,958 DEBUG [M:0;0d942cb2025d:39283 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/2670c5369b89483e8bab21f026400321 is 82, key is hbase:meta,,1/info:regioninfo/1733637105209/Put/seqid=0 2024-12-08T05:52:35,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39511 is added to blk_1073741850_1026 (size=5672) 2024-12-08T05:52:35,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33893 is added to blk_1073741850_1026 (size=5672) 2024-12-08T05:52:35,963 INFO [M:0;0d942cb2025d:39283 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/2670c5369b89483e8bab21f026400321 2024-12-08T05:52:35,981 DEBUG [M:0;0d942cb2025d:39283 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e5cc0da5bd004e03874a63cee4c03682 is 798, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733637105627/Put/seqid=0 2024-12-08T05:52:35,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33893 is added to blk_1073741851_1027 (size=7818) 2024-12-08T05:52:35,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39511 is added to blk_1073741851_1027 (size=7818) 2024-12-08T05:52:35,986 INFO [M:0;0d942cb2025d:39283 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.95 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e5cc0da5bd004e03874a63cee4c03682 2024-12-08T05:52:35,990 INFO [M:0;0d942cb2025d:39283 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for e5cc0da5bd004e03874a63cee4c03682 2024-12-08T05:52:36,011 DEBUG [M:0;0d942cb2025d:39283 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b65f2d23fc6e4897a79d3089009d37da is 69, key is 0d942cb2025d,40823,1733637104426/rs:state/1733637104660/Put/seqid=0 2024-12-08T05:52:36,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39511 is added to blk_1073741852_1028 (size=5156) 2024-12-08T05:52:36,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33893 is added to blk_1073741852_1028 (size=5156) 2024-12-08T05:52:36,016 INFO [M:0;0d942cb2025d:39283 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b65f2d23fc6e4897a79d3089009d37da 2024-12-08T05:52:36,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40823-0x10190a0cd820001, quorum=127.0.0.1:64902, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T05:52:36,031 INFO [RS:0;0d942cb2025d:40823 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T05:52:36,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40823-0x10190a0cd820001, quorum=127.0.0.1:64902, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T05:52:36,031 INFO [RS:0;0d942cb2025d:40823 {}] regionserver.HRegionServer(1031): Exiting; stopping=0d942cb2025d,40823,1733637104426; zookeeper connection closed. 2024-12-08T05:52:36,032 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7abba695 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7abba695 2024-12-08T05:52:36,032 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-08T05:52:36,035 DEBUG [M:0;0d942cb2025d:39283 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/84b23facba70447ead0f96a2696c83cb is 52, key is load_balancer_on/state:d/1733637105251/Put/seqid=0 2024-12-08T05:52:36,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39511 is added to blk_1073741853_1029 (size=5056) 2024-12-08T05:52:36,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33893 is added to blk_1073741853_1029 (size=5056) 2024-12-08T05:52:36,040 INFO [M:0;0d942cb2025d:39283 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/84b23facba70447ead0f96a2696c83cb 2024-12-08T05:52:36,045 DEBUG [M:0;0d942cb2025d:39283 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/2670c5369b89483e8bab21f026400321 as hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/2670c5369b89483e8bab21f026400321 2024-12-08T05:52:36,050 INFO [M:0;0d942cb2025d:39283 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/2670c5369b89483e8bab21f026400321, entries=8, sequenceid=121, filesize=5.5 K 2024-12-08T05:52:36,051 DEBUG [M:0;0d942cb2025d:39283 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e5cc0da5bd004e03874a63cee4c03682 as hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/e5cc0da5bd004e03874a63cee4c03682 2024-12-08T05:52:36,055 INFO [M:0;0d942cb2025d:39283 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for e5cc0da5bd004e03874a63cee4c03682 2024-12-08T05:52:36,055 INFO [M:0;0d942cb2025d:39283 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/e5cc0da5bd004e03874a63cee4c03682, entries=14, sequenceid=121, filesize=7.6 K 2024-12-08T05:52:36,056 DEBUG [M:0;0d942cb2025d:39283 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b65f2d23fc6e4897a79d3089009d37da as hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/b65f2d23fc6e4897a79d3089009d37da 2024-12-08T05:52:36,061 INFO [M:0;0d942cb2025d:39283 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/b65f2d23fc6e4897a79d3089009d37da, entries=1, sequenceid=121, filesize=5.0 K 2024-12-08T05:52:36,062 DEBUG [M:0;0d942cb2025d:39283 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/84b23facba70447ead0f96a2696c83cb as hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/84b23facba70447ead0f96a2696c83cb 2024-12-08T05:52:36,066 INFO [M:0;0d942cb2025d:39283 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42183/user/jenkins/test-data/d227cdad-79ac-8a74-0819-74ae44a50158/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/84b23facba70447ead0f96a2696c83cb, entries=1, sequenceid=121, filesize=4.9 K 2024-12-08T05:52:36,067 INFO [M:0;0d942cb2025d:39283 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.55 KB/44593, heapSize ~54.85 KB/56168, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 125ms, sequenceid=121, compaction requested=false 2024-12-08T05:52:36,068 INFO [M:0;0d942cb2025d:39283 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:52:36,068 DEBUG [M:0;0d942cb2025d:39283 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733637155942Disabling compacts and flushes for region at 1733637155942Disabling writes for close at 1733637155942Obtaining lock to block concurrent updates at 1733637155942Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733637155942Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44593, getHeapSize=56168, getOffHeapSize=0, getCellsCount=140 at 1733637155942Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733637155943 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733637155943Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733637155957 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733637155957Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733637155967 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733637155981 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733637155981Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733637155991 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733637156010 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733637156010Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733637156021 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733637156035 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733637156035Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@359c33a1: reopening flushed file at 1733637156045 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2828d41e: reopening flushed file at 1733637156050 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@67d4a953: reopening flushed file at 1733637156056 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2f3d474a: reopening flushed file at 1733637156061 (+5 ms)Finished flush of dataSize ~43.55 KB/44593, heapSize ~54.85 KB/56168, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 125ms, sequenceid=121, compaction requested=false at 1733637156067 (+6 ms)Writing region close event to WAL at 1733637156068 (+1 ms)Closed at 1733637156068 2024-12-08T05:52:36,069 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:52:36,069 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:52:36,069 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:52:36,069 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:52:36,069 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:52:36,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39511 is added to blk_1073741830_1006 (size=52990) 2024-12-08T05:52:36,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33893 is added to blk_1073741830_1006 (size=52990) 2024-12-08T05:52:36,072 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T05:52:36,072 INFO [M:0;0d942cb2025d:39283 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-08T05:52:36,072 INFO [M:0;0d942cb2025d:39283 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39283 2024-12-08T05:52:36,072 INFO [M:0;0d942cb2025d:39283 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T05:52:36,173 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39283-0x10190a0cd820000, quorum=127.0.0.1:64902, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T05:52:36,173 INFO [M:0;0d942cb2025d:39283 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T05:52:36,173 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39283-0x10190a0cd820000, quorum=127.0.0.1:64902, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T05:52:36,176 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@75e53117{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T05:52:36,176 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1c9d71b0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T05:52:36,176 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T05:52:36,176 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@429ea7f5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T05:52:36,176 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3f3146c7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e41e6cf8-613f-7dce-f173-02f0bcf04c58/hadoop.log.dir/,STOPPED} 2024-12-08T05:52:36,178 WARN [BP-1825605769-172.17.0.2-1733637103728 heartbeating to localhost/127.0.0.1:42183 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T05:52:36,178 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T05:52:36,178 WARN [BP-1825605769-172.17.0.2-1733637103728 heartbeating to localhost/127.0.0.1:42183 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1825605769-172.17.0.2-1733637103728 (Datanode Uuid bb808f8c-e084-41fe-a6ad-81f5317f75fc) service to localhost/127.0.0.1:42183 2024-12-08T05:52:36,178 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T05:52:36,179 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e41e6cf8-613f-7dce-f173-02f0bcf04c58/cluster_0e549d27-bad1-24e7-4bcc-f6a4a5c5e724/data/data3/current/BP-1825605769-172.17.0.2-1733637103728 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T05:52:36,179 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e41e6cf8-613f-7dce-f173-02f0bcf04c58/cluster_0e549d27-bad1-24e7-4bcc-f6a4a5c5e724/data/data4/current/BP-1825605769-172.17.0.2-1733637103728 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T05:52:36,179 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T05:52:36,181 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3f519892{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T05:52:36,181 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7a68ce5b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T05:52:36,181 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T05:52:36,181 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5f86104c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T05:52:36,181 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3b7dc08e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e41e6cf8-613f-7dce-f173-02f0bcf04c58/hadoop.log.dir/,STOPPED} 2024-12-08T05:52:36,183 WARN [BP-1825605769-172.17.0.2-1733637103728 heartbeating to localhost/127.0.0.1:42183 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T05:52:36,183 WARN [BP-1825605769-172.17.0.2-1733637103728 heartbeating to localhost/127.0.0.1:42183 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1825605769-172.17.0.2-1733637103728 (Datanode Uuid 6895a8d4-eb37-4f43-80e9-0d2b5d47378c) service to localhost/127.0.0.1:42183 2024-12-08T05:52:36,183 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T05:52:36,183 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T05:52:36,183 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e41e6cf8-613f-7dce-f173-02f0bcf04c58/cluster_0e549d27-bad1-24e7-4bcc-f6a4a5c5e724/data/data1/current/BP-1825605769-172.17.0.2-1733637103728 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T05:52:36,184 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e41e6cf8-613f-7dce-f173-02f0bcf04c58/cluster_0e549d27-bad1-24e7-4bcc-f6a4a5c5e724/data/data2/current/BP-1825605769-172.17.0.2-1733637103728 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T05:52:36,184 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T05:52:36,190 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@45c003ed{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-08T05:52:36,190 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1ff1ded9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T05:52:36,190 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T05:52:36,190 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3eb3d465{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T05:52:36,190 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7255e0a2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e41e6cf8-613f-7dce-f173-02f0bcf04c58/hadoop.log.dir/,STOPPED} 2024-12-08T05:52:36,196 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-08T05:52:36,213 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-08T05:52:36,224 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=207 (was 181) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:42183 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:42183 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/0d942cb2025d:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42183 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1609174458) connection to localhost/127.0.0.1:42183 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42183 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42183 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1609174458) connection to localhost/127.0.0.1:42183 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1609174458) connection to localhost/127.0.0.1:42183 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=483 (was 455) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=88 (was 133), ProcessCount=11 (was 11), AvailableMemoryMB=7663 (was 7790) 2024-12-08T05:52:36,231 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=207, OpenFileDescriptor=483, MaxFileDescriptor=1048576, SystemLoadAverage=88, ProcessCount=11, AvailableMemoryMB=7663 2024-12-08T05:52:36,231 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-08T05:52:36,231 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e41e6cf8-613f-7dce-f173-02f0bcf04c58/hadoop.log.dir so I do NOT create it in target/test-data/92aebf17-5bac-fad6-897b-ac32a37f8e80 2024-12-08T05:52:36,231 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e41e6cf8-613f-7dce-f173-02f0bcf04c58/hadoop.tmp.dir so I do NOT create it in target/test-data/92aebf17-5bac-fad6-897b-ac32a37f8e80 2024-12-08T05:52:36,231 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/92aebf17-5bac-fad6-897b-ac32a37f8e80/cluster_54a16acd-361e-5624-5a58-650eccf65b79, deleteOnExit=true 2024-12-08T05:52:36,231 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-08T05:52:36,232 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/92aebf17-5bac-fad6-897b-ac32a37f8e80/test.cache.data in system properties and HBase conf 2024-12-08T05:52:36,232 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/92aebf17-5bac-fad6-897b-ac32a37f8e80/hadoop.tmp.dir in system properties and HBase conf 2024-12-08T05:52:36,232 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/92aebf17-5bac-fad6-897b-ac32a37f8e80/hadoop.log.dir in system properties and HBase conf 2024-12-08T05:52:36,232 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/92aebf17-5bac-fad6-897b-ac32a37f8e80/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-08T05:52:36,232 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/92aebf17-5bac-fad6-897b-ac32a37f8e80/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-08T05:52:36,232 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-08T05:52:36,232 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-08T05:52:36,232 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/92aebf17-5bac-fad6-897b-ac32a37f8e80/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-08T05:52:36,232 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/92aebf17-5bac-fad6-897b-ac32a37f8e80/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-08T05:52:36,232 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/92aebf17-5bac-fad6-897b-ac32a37f8e80/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-08T05:52:36,232 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/92aebf17-5bac-fad6-897b-ac32a37f8e80/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-08T05:52:36,232 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/92aebf17-5bac-fad6-897b-ac32a37f8e80/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-08T05:52:36,232 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/92aebf17-5bac-fad6-897b-ac32a37f8e80/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-08T05:52:36,232 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/92aebf17-5bac-fad6-897b-ac32a37f8e80/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-08T05:52:36,232 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/92aebf17-5bac-fad6-897b-ac32a37f8e80/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-08T05:52:36,232 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/92aebf17-5bac-fad6-897b-ac32a37f8e80/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-08T05:52:36,233 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/92aebf17-5bac-fad6-897b-ac32a37f8e80/nfs.dump.dir in system properties and HBase conf 2024-12-08T05:52:36,233 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/92aebf17-5bac-fad6-897b-ac32a37f8e80/java.io.tmpdir in system properties and HBase conf 2024-12-08T05:52:36,233 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/92aebf17-5bac-fad6-897b-ac32a37f8e80/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-08T05:52:36,233 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/92aebf17-5bac-fad6-897b-ac32a37f8e80/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-08T05:52:36,233 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/92aebf17-5bac-fad6-897b-ac32a37f8e80/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-08T05:52:36,245 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-08T05:52:36,305 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T05:52:36,309 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T05:52:36,310 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T05:52:36,310 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T05:52:36,310 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-08T05:52:36,310 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T05:52:36,311 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@751d1cbe{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/92aebf17-5bac-fad6-897b-ac32a37f8e80/hadoop.log.dir/,AVAILABLE} 2024-12-08T05:52:36,311 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@462b3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T05:52:36,426 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@78d0933c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/92aebf17-5bac-fad6-897b-ac32a37f8e80/java.io.tmpdir/jetty-localhost-42281-hadoop-hdfs-3_4_1-tests_jar-_-any-8451750054174170819/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-08T05:52:36,426 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@32a8e311{HTTP/1.1, (http/1.1)}{localhost:42281} 2024-12-08T05:52:36,427 INFO [Time-limited test {}] server.Server(415): Started @236610ms 2024-12-08T05:52:36,439 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-08T05:52:36,542 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T05:52:36,545 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T05:52:36,545 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T05:52:36,545 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T05:52:36,546 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-08T05:52:36,546 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@14304e8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/92aebf17-5bac-fad6-897b-ac32a37f8e80/hadoop.log.dir/,AVAILABLE} 2024-12-08T05:52:36,546 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@53db1080{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T05:52:36,677 INFO [regionserver/0d942cb2025d:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T05:52:36,680 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@59287e93{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/92aebf17-5bac-fad6-897b-ac32a37f8e80/java.io.tmpdir/jetty-localhost-46551-hadoop-hdfs-3_4_1-tests_jar-_-any-12308466782381842850/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T05:52:36,680 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@36e18827{HTTP/1.1, (http/1.1)}{localhost:46551} 2024-12-08T05:52:36,680 INFO [Time-limited test {}] server.Server(415): Started @236864ms 2024-12-08T05:52:36,682 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T05:52:36,710 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T05:52:36,712 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T05:52:36,713 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T05:52:36,713 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T05:52:36,713 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T05:52:36,714 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2ee1549{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/92aebf17-5bac-fad6-897b-ac32a37f8e80/hadoop.log.dir/,AVAILABLE} 2024-12-08T05:52:36,714 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@325d40e6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T05:52:36,754 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:36,754 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:36,786 WARN [Thread-1951 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/92aebf17-5bac-fad6-897b-ac32a37f8e80/cluster_54a16acd-361e-5624-5a58-650eccf65b79/data/data2/current/BP-178084469-172.17.0.2-1733637156251/current, will proceed with Du for space computation calculation, 2024-12-08T05:52:36,786 WARN [Thread-1950 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/92aebf17-5bac-fad6-897b-ac32a37f8e80/cluster_54a16acd-361e-5624-5a58-650eccf65b79/data/data1/current/BP-178084469-172.17.0.2-1733637156251/current, will proceed with Du for space computation calculation, 2024-12-08T05:52:36,808 WARN [Thread-1929 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T05:52:36,810 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd31f401bb0d4c323 with lease ID 0x9e0a95cbf13da065: Processing first storage report for DS-721fe17e-085b-449f-b642-fc8c3a698b57 from datanode DatanodeRegistration(127.0.0.1:39251, datanodeUuid=fcc70864-cafb-49da-b8c0-681325cbab83, infoPort=39661, infoSecurePort=0, ipcPort=35141, storageInfo=lv=-57;cid=testClusterID;nsid=947988525;c=1733637156251) 2024-12-08T05:52:36,810 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd31f401bb0d4c323 with lease ID 0x9e0a95cbf13da065: from storage DS-721fe17e-085b-449f-b642-fc8c3a698b57 node DatanodeRegistration(127.0.0.1:39251, datanodeUuid=fcc70864-cafb-49da-b8c0-681325cbab83, infoPort=39661, infoSecurePort=0, ipcPort=35141, storageInfo=lv=-57;cid=testClusterID;nsid=947988525;c=1733637156251), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T05:52:36,811 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd31f401bb0d4c323 with lease ID 0x9e0a95cbf13da065: Processing first storage report for DS-bfc87b8e-ee89-4c96-a218-959f23c865e1 from datanode DatanodeRegistration(127.0.0.1:39251, datanodeUuid=fcc70864-cafb-49da-b8c0-681325cbab83, infoPort=39661, infoSecurePort=0, ipcPort=35141, storageInfo=lv=-57;cid=testClusterID;nsid=947988525;c=1733637156251) 2024-12-08T05:52:36,811 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd31f401bb0d4c323 with lease ID 0x9e0a95cbf13da065: from storage DS-bfc87b8e-ee89-4c96-a218-959f23c865e1 node DatanodeRegistration(127.0.0.1:39251, datanodeUuid=fcc70864-cafb-49da-b8c0-681325cbab83, infoPort=39661, infoSecurePort=0, ipcPort=35141, storageInfo=lv=-57;cid=testClusterID;nsid=947988525;c=1733637156251), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T05:52:36,845 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4ca112a4{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/92aebf17-5bac-fad6-897b-ac32a37f8e80/java.io.tmpdir/jetty-localhost-39159-hadoop-hdfs-3_4_1-tests_jar-_-any-11491951653001375120/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T05:52:36,846 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3cb0a7d3{HTTP/1.1, (http/1.1)}{localhost:39159} 2024-12-08T05:52:36,846 INFO [Time-limited test {}] server.Server(415): Started @237030ms 2024-12-08T05:52:36,847 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T05:52:36,931 WARN [Thread-1976 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/92aebf17-5bac-fad6-897b-ac32a37f8e80/cluster_54a16acd-361e-5624-5a58-650eccf65b79/data/data3/current/BP-178084469-172.17.0.2-1733637156251/current, will proceed with Du for space computation calculation, 2024-12-08T05:52:36,931 WARN [Thread-1977 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/92aebf17-5bac-fad6-897b-ac32a37f8e80/cluster_54a16acd-361e-5624-5a58-650eccf65b79/data/data4/current/BP-178084469-172.17.0.2-1733637156251/current, will proceed with Du for space computation calculation, 2024-12-08T05:52:36,947 WARN [Thread-1965 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T05:52:36,949 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x64d97460cf3aa8c with lease ID 0x9e0a95cbf13da066: Processing first storage report for DS-758f31c6-cf48-4d3a-a56d-efeb8189992b from datanode DatanodeRegistration(127.0.0.1:41373, datanodeUuid=670e7c3a-b28b-42f1-b4f1-a1816b844a65, infoPort=36391, infoSecurePort=0, ipcPort=41189, storageInfo=lv=-57;cid=testClusterID;nsid=947988525;c=1733637156251) 2024-12-08T05:52:36,949 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x64d97460cf3aa8c with lease ID 0x9e0a95cbf13da066: from storage DS-758f31c6-cf48-4d3a-a56d-efeb8189992b node DatanodeRegistration(127.0.0.1:41373, datanodeUuid=670e7c3a-b28b-42f1-b4f1-a1816b844a65, infoPort=36391, infoSecurePort=0, ipcPort=41189, storageInfo=lv=-57;cid=testClusterID;nsid=947988525;c=1733637156251), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T05:52:36,949 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x64d97460cf3aa8c with lease ID 0x9e0a95cbf13da066: Processing first storage report for DS-9c1e0ba6-c947-48d9-8f9a-5b7fa6dafbac from datanode DatanodeRegistration(127.0.0.1:41373, datanodeUuid=670e7c3a-b28b-42f1-b4f1-a1816b844a65, infoPort=36391, infoSecurePort=0, ipcPort=41189, storageInfo=lv=-57;cid=testClusterID;nsid=947988525;c=1733637156251) 2024-12-08T05:52:36,949 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x64d97460cf3aa8c with lease ID 0x9e0a95cbf13da066: from storage DS-9c1e0ba6-c947-48d9-8f9a-5b7fa6dafbac node DatanodeRegistration(127.0.0.1:41373, datanodeUuid=670e7c3a-b28b-42f1-b4f1-a1816b844a65, infoPort=36391, infoSecurePort=0, ipcPort=41189, storageInfo=lv=-57;cid=testClusterID;nsid=947988525;c=1733637156251), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T05:52:36,968 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/92aebf17-5bac-fad6-897b-ac32a37f8e80 2024-12-08T05:52:36,971 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/92aebf17-5bac-fad6-897b-ac32a37f8e80/cluster_54a16acd-361e-5624-5a58-650eccf65b79/zookeeper_0, clientPort=58794, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/92aebf17-5bac-fad6-897b-ac32a37f8e80/cluster_54a16acd-361e-5624-5a58-650eccf65b79/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/92aebf17-5bac-fad6-897b-ac32a37f8e80/cluster_54a16acd-361e-5624-5a58-650eccf65b79/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-08T05:52:36,972 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=58794 2024-12-08T05:52:36,973 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:52:36,975 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:52:36,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_1073741825_1001 (size=7) 2024-12-08T05:52:36,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39251 is added to blk_1073741825_1001 (size=7) 2024-12-08T05:52:36,984 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0 with version=8 2024-12-08T05:52:36,984 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/hbase-staging 2024-12-08T05:52:36,987 INFO [Time-limited test {}] client.ConnectionUtils(128): master/0d942cb2025d:0 server-side Connection retries=45 2024-12-08T05:52:36,987 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T05:52:36,987 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T05:52:36,987 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T05:52:36,987 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T05:52:36,987 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T05:52:36,987 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-08T05:52:36,987 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T05:52:36,988 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44379 2024-12-08T05:52:36,990 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:44379 connecting to ZooKeeper ensemble=127.0.0.1:58794 2024-12-08T05:52:37,001 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:443790x0, quorum=127.0.0.1:58794, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T05:52:37,002 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:44379-0x10190a19af90000 connected 2024-12-08T05:52:37,016 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:52:37,017 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:52:37,019 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44379-0x10190a19af90000, quorum=127.0.0.1:58794, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T05:52:37,019 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0, hbase.cluster.distributed=false 2024-12-08T05:52:37,021 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44379-0x10190a19af90000, quorum=127.0.0.1:58794, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T05:52:37,021 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44379 2024-12-08T05:52:37,021 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44379 2024-12-08T05:52:37,022 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44379 2024-12-08T05:52:37,022 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44379 2024-12-08T05:52:37,022 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44379 2024-12-08T05:52:37,037 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/0d942cb2025d:0 server-side Connection retries=45 2024-12-08T05:52:37,037 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T05:52:37,037 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T05:52:37,037 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T05:52:37,037 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T05:52:37,037 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T05:52:37,037 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-08T05:52:37,038 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T05:52:37,038 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40801 2024-12-08T05:52:37,040 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40801 connecting to ZooKeeper ensemble=127.0.0.1:58794 2024-12-08T05:52:37,040 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:52:37,042 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:52:37,046 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:408010x0, quorum=127.0.0.1:58794, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T05:52:37,046 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:408010x0, quorum=127.0.0.1:58794, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T05:52:37,046 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40801-0x10190a19af90001 connected 2024-12-08T05:52:37,046 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-08T05:52:37,047 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-08T05:52:37,047 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40801-0x10190a19af90001, quorum=127.0.0.1:58794, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-08T05:52:37,048 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40801-0x10190a19af90001, quorum=127.0.0.1:58794, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T05:52:37,049 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40801 2024-12-08T05:52:37,049 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40801 2024-12-08T05:52:37,049 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40801 2024-12-08T05:52:37,049 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40801 2024-12-08T05:52:37,049 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40801 2024-12-08T05:52:37,061 DEBUG [M:0;0d942cb2025d:44379 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;0d942cb2025d:44379 2024-12-08T05:52:37,061 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/0d942cb2025d,44379,1733637156986 2024-12-08T05:52:37,063 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44379-0x10190a19af90000, quorum=127.0.0.1:58794, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T05:52:37,063 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40801-0x10190a19af90001, quorum=127.0.0.1:58794, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T05:52:37,063 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44379-0x10190a19af90000, quorum=127.0.0.1:58794, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/0d942cb2025d,44379,1733637156986 2024-12-08T05:52:37,064 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44379-0x10190a19af90000, quorum=127.0.0.1:58794, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:52:37,064 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40801-0x10190a19af90001, quorum=127.0.0.1:58794, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-08T05:52:37,064 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40801-0x10190a19af90001, quorum=127.0.0.1:58794, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:52:37,065 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44379-0x10190a19af90000, quorum=127.0.0.1:58794, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-08T05:52:37,065 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/0d942cb2025d,44379,1733637156986 from backup master directory 2024-12-08T05:52:37,068 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44379-0x10190a19af90000, quorum=127.0.0.1:58794, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/0d942cb2025d,44379,1733637156986 2024-12-08T05:52:37,068 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44379-0x10190a19af90000, quorum=127.0.0.1:58794, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T05:52:37,068 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40801-0x10190a19af90001, quorum=127.0.0.1:58794, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T05:52:37,068 WARN [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T05:52:37,068 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=0d942cb2025d,44379,1733637156986 2024-12-08T05:52:37,072 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/hbase.id] with ID: 57298462-6614-4b10-bc49-8954e10f897c 2024-12-08T05:52:37,072 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/.tmp/hbase.id 2024-12-08T05:52:37,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_1073741826_1002 (size=42) 2024-12-08T05:52:37,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39251 is added to blk_1073741826_1002 (size=42) 2024-12-08T05:52:37,078 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/.tmp/hbase.id]:[hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/hbase.id] 2024-12-08T05:52:37,088 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:52:37,088 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-08T05:52:37,089 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-08T05:52:37,091 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44379-0x10190a19af90000, quorum=127.0.0.1:58794, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:52:37,091 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40801-0x10190a19af90001, quorum=127.0.0.1:58794, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:52:37,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39251 is added to blk_1073741827_1003 (size=196) 2024-12-08T05:52:37,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_1073741827_1003 (size=196) 2024-12-08T05:52:37,097 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T05:52:37,097 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-08T05:52:37,098 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T05:52:37,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39251 is added to blk_1073741828_1004 (size=1189) 2024-12-08T05:52:37,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_1073741828_1004 (size=1189) 2024-12-08T05:52:37,104 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/MasterData/data/master/store 2024-12-08T05:52:37,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_1073741829_1005 (size=34) 2024-12-08T05:52:37,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39251 is added to blk_1073741829_1005 (size=34) 2024-12-08T05:52:37,110 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T05:52:37,110 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-08T05:52:37,110 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:52:37,110 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:52:37,110 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-08T05:52:37,110 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:52:37,110 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:52:37,110 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733637157110Disabling compacts and flushes for region at 1733637157110Disabling writes for close at 1733637157110Writing region close event to WAL at 1733637157110Closed at 1733637157110 2024-12-08T05:52:37,111 WARN [master/0d942cb2025d:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/MasterData/data/master/store/.initializing 2024-12-08T05:52:37,111 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/MasterData/WALs/0d942cb2025d,44379,1733637156986 2024-12-08T05:52:37,113 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0d942cb2025d%2C44379%2C1733637156986, suffix=, logDir=hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/MasterData/WALs/0d942cb2025d,44379,1733637156986, archiveDir=hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/MasterData/oldWALs, maxLogs=10 2024-12-08T05:52:37,113 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0d942cb2025d%2C44379%2C1733637156986.1733637157113 2024-12-08T05:52:37,118 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/MasterData/WALs/0d942cb2025d,44379,1733637156986/0d942cb2025d%2C44379%2C1733637156986.1733637157113 2024-12-08T05:52:37,119 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36391:36391),(127.0.0.1/127.0.0.1:39661:39661)] 2024-12-08T05:52:37,119 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-08T05:52:37,120 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T05:52:37,120 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:52:37,120 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:52:37,121 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:52:37,122 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-08T05:52:37,122 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:52:37,123 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:52:37,123 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:52:37,124 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-08T05:52:37,124 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:52:37,124 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T05:52:37,125 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:52:37,125 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-08T05:52:37,125 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:52:37,126 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T05:52:37,126 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:52:37,127 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-08T05:52:37,127 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:52:37,127 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T05:52:37,127 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:52:37,128 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:52:37,128 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:52:37,129 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:52:37,129 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:52:37,130 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-08T05:52:37,130 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:52:37,132 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T05:52:37,133 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=777962, jitterRate=-0.010770216584205627}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-08T05:52:37,133 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733637157120Initializing all the Stores at 1733637157121 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733637157121Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733637157121Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733637157121Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733637157121Cleaning up temporary data from old regions at 1733637157129 (+8 ms)Region opened successfully at 1733637157133 (+4 ms) 2024-12-08T05:52:37,133 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-08T05:52:37,136 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@25577431, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0d942cb2025d/172.17.0.2:0 2024-12-08T05:52:37,137 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-08T05:52:37,137 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-08T05:52:37,137 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-08T05:52:37,138 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-08T05:52:37,138 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-08T05:52:37,138 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-08T05:52:37,138 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-08T05:52:37,140 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-08T05:52:37,141 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44379-0x10190a19af90000, quorum=127.0.0.1:58794, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-08T05:52:37,142 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-08T05:52:37,142 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-08T05:52:37,143 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44379-0x10190a19af90000, quorum=127.0.0.1:58794, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-08T05:52:37,144 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-08T05:52:37,144 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-08T05:52:37,145 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44379-0x10190a19af90000, quorum=127.0.0.1:58794, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-08T05:52:37,147 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-08T05:52:37,148 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44379-0x10190a19af90000, quorum=127.0.0.1:58794, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-08T05:52:37,149 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-08T05:52:37,150 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44379-0x10190a19af90000, quorum=127.0.0.1:58794, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-08T05:52:37,151 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-08T05:52:37,153 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44379-0x10190a19af90000, quorum=127.0.0.1:58794, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T05:52:37,153 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40801-0x10190a19af90001, quorum=127.0.0.1:58794, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T05:52:37,153 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44379-0x10190a19af90000, quorum=127.0.0.1:58794, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:52:37,153 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40801-0x10190a19af90001, quorum=127.0.0.1:58794, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:52:37,153 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=0d942cb2025d,44379,1733637156986, sessionid=0x10190a19af90000, setting cluster-up flag (Was=false) 2024-12-08T05:52:37,157 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44379-0x10190a19af90000, quorum=127.0.0.1:58794, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:52:37,157 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40801-0x10190a19af90001, quorum=127.0.0.1:58794, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:52:37,161 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-08T05:52:37,162 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0d942cb2025d,44379,1733637156986 2024-12-08T05:52:37,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44379-0x10190a19af90000, quorum=127.0.0.1:58794, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:52:37,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40801-0x10190a19af90001, quorum=127.0.0.1:58794, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:52:37,170 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-08T05:52:37,171 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0d942cb2025d,44379,1733637156986 2024-12-08T05:52:37,172 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-08T05:52:37,173 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-08T05:52:37,173 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-08T05:52:37,173 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-08T05:52:37,174 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 0d942cb2025d,44379,1733637156986 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-08T05:52:37,175 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/0d942cb2025d:0, corePoolSize=5, maxPoolSize=5 2024-12-08T05:52:37,175 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/0d942cb2025d:0, corePoolSize=5, maxPoolSize=5 2024-12-08T05:52:37,175 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/0d942cb2025d:0, corePoolSize=5, maxPoolSize=5 2024-12-08T05:52:37,175 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/0d942cb2025d:0, corePoolSize=5, maxPoolSize=5 2024-12-08T05:52:37,175 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/0d942cb2025d:0, corePoolSize=10, maxPoolSize=10 2024-12-08T05:52:37,175 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:52:37,175 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/0d942cb2025d:0, corePoolSize=2, maxPoolSize=2 2024-12-08T05:52:37,175 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:52:37,176 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733637187176 2024-12-08T05:52:37,176 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-08T05:52:37,176 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-08T05:52:37,176 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-08T05:52:37,176 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-08T05:52:37,176 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-08T05:52:37,176 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-08T05:52:37,176 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-08T05:52:37,176 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-08T05:52:37,177 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-08T05:52:37,177 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-08T05:52:37,177 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T05:52:37,177 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-08T05:52:37,177 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-08T05:52:37,177 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-08T05:52:37,177 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/0d942cb2025d:0:becomeActiveMaster-HFileCleaner.large.0-1733637157177,5,FailOnTimeoutGroup] 2024-12-08T05:52:37,177 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/0d942cb2025d:0:becomeActiveMaster-HFileCleaner.small.0-1733637157177,5,FailOnTimeoutGroup] 2024-12-08T05:52:37,177 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-08T05:52:37,177 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-08T05:52:37,177 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-08T05:52:37,177 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-08T05:52:37,178 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:52:37,178 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-08T05:52:37,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39251 is added to blk_1073741831_1007 (size=1321) 2024-12-08T05:52:37,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_1073741831_1007 (size=1321) 2024-12-08T05:52:37,189 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-08T05:52:37,189 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0 2024-12-08T05:52:37,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_1073741832_1008 (size=32) 2024-12-08T05:52:37,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39251 is added to blk_1073741832_1008 (size=32) 2024-12-08T05:52:37,198 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T05:52:37,199 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-08T05:52:37,200 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-08T05:52:37,200 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:52:37,201 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:52:37,201 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-08T05:52:37,202 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-08T05:52:37,202 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:52:37,202 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:52:37,203 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-08T05:52:37,203 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-08T05:52:37,204 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:52:37,204 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:52:37,204 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-08T05:52:37,205 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-08T05:52:37,205 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:52:37,206 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:52:37,206 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-08T05:52:37,206 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/hbase/meta/1588230740 2024-12-08T05:52:37,206 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/hbase/meta/1588230740 2024-12-08T05:52:37,208 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-08T05:52:37,208 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-08T05:52:37,208 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-08T05:52:37,210 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-08T05:52:37,212 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T05:52:37,212 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=830650, jitterRate=0.05622659623622894}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-08T05:52:37,213 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733637157198Initializing all the Stores at 1733637157199 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733637157199Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733637157199Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733637157199Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733637157199Cleaning up temporary data from old regions at 1733637157208 (+9 ms)Region opened successfully at 1733637157213 (+5 ms) 2024-12-08T05:52:37,213 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-08T05:52:37,213 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-08T05:52:37,213 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-08T05:52:37,213 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-08T05:52:37,213 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-08T05:52:37,213 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-08T05:52:37,213 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733637157213Disabling compacts and flushes for region at 1733637157213Disabling writes for close at 1733637157213Writing region close event to WAL at 1733637157213Closed at 1733637157213 2024-12-08T05:52:37,214 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T05:52:37,214 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-08T05:52:37,214 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-08T05:52:37,216 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-08T05:52:37,217 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-08T05:52:37,251 INFO [RS:0;0d942cb2025d:40801 {}] regionserver.HRegionServer(746): ClusterId : 57298462-6614-4b10-bc49-8954e10f897c 2024-12-08T05:52:37,251 DEBUG [RS:0;0d942cb2025d:40801 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-08T05:52:37,253 DEBUG [RS:0;0d942cb2025d:40801 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-08T05:52:37,253 DEBUG [RS:0;0d942cb2025d:40801 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-08T05:52:37,254 DEBUG [RS:0;0d942cb2025d:40801 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-08T05:52:37,255 DEBUG [RS:0;0d942cb2025d:40801 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@78dc2200, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0d942cb2025d/172.17.0.2:0 2024-12-08T05:52:37,266 DEBUG [RS:0;0d942cb2025d:40801 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;0d942cb2025d:40801 2024-12-08T05:52:37,266 INFO [RS:0;0d942cb2025d:40801 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-08T05:52:37,266 INFO [RS:0;0d942cb2025d:40801 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-08T05:52:37,266 DEBUG [RS:0;0d942cb2025d:40801 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-08T05:52:37,267 INFO [RS:0;0d942cb2025d:40801 {}] regionserver.HRegionServer(2659): reportForDuty to master=0d942cb2025d,44379,1733637156986 with port=40801, startcode=1733637157037 2024-12-08T05:52:37,267 DEBUG [RS:0;0d942cb2025d:40801 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-08T05:52:37,269 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43611, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-12-08T05:52:37,269 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44379 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 0d942cb2025d,40801,1733637157037 2024-12-08T05:52:37,269 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44379 {}] master.ServerManager(517): Registering regionserver=0d942cb2025d,40801,1733637157037 2024-12-08T05:52:37,271 DEBUG [RS:0;0d942cb2025d:40801 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0 2024-12-08T05:52:37,271 DEBUG [RS:0;0d942cb2025d:40801 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:45689 2024-12-08T05:52:37,271 DEBUG [RS:0;0d942cb2025d:40801 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-08T05:52:37,273 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44379-0x10190a19af90000, quorum=127.0.0.1:58794, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T05:52:37,274 DEBUG [RS:0;0d942cb2025d:40801 {}] zookeeper.ZKUtil(111): regionserver:40801-0x10190a19af90001, quorum=127.0.0.1:58794, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/0d942cb2025d,40801,1733637157037 2024-12-08T05:52:37,274 WARN [RS:0;0d942cb2025d:40801 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T05:52:37,274 INFO [RS:0;0d942cb2025d:40801 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T05:52:37,274 DEBUG [RS:0;0d942cb2025d:40801 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/WALs/0d942cb2025d,40801,1733637157037 2024-12-08T05:52:37,274 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [0d942cb2025d,40801,1733637157037] 2024-12-08T05:52:37,278 INFO [RS:0;0d942cb2025d:40801 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-08T05:52:37,280 INFO [RS:0;0d942cb2025d:40801 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-08T05:52:37,280 INFO [RS:0;0d942cb2025d:40801 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-08T05:52:37,280 INFO [RS:0;0d942cb2025d:40801 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T05:52:37,281 INFO [RS:0;0d942cb2025d:40801 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-08T05:52:37,281 INFO [RS:0;0d942cb2025d:40801 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-08T05:52:37,282 INFO [RS:0;0d942cb2025d:40801 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-08T05:52:37,282 DEBUG [RS:0;0d942cb2025d:40801 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:52:37,282 DEBUG [RS:0;0d942cb2025d:40801 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:52:37,282 DEBUG [RS:0;0d942cb2025d:40801 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:52:37,282 DEBUG [RS:0;0d942cb2025d:40801 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:52:37,282 DEBUG [RS:0;0d942cb2025d:40801 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:52:37,282 DEBUG [RS:0;0d942cb2025d:40801 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/0d942cb2025d:0, corePoolSize=2, maxPoolSize=2 2024-12-08T05:52:37,282 DEBUG [RS:0;0d942cb2025d:40801 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:52:37,282 DEBUG [RS:0;0d942cb2025d:40801 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:52:37,282 DEBUG [RS:0;0d942cb2025d:40801 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:52:37,282 DEBUG [RS:0;0d942cb2025d:40801 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:52:37,282 DEBUG [RS:0;0d942cb2025d:40801 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:52:37,282 DEBUG [RS:0;0d942cb2025d:40801 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:52:37,282 DEBUG [RS:0;0d942cb2025d:40801 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/0d942cb2025d:0, corePoolSize=3, maxPoolSize=3 2024-12-08T05:52:37,282 DEBUG [RS:0;0d942cb2025d:40801 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0, corePoolSize=3, maxPoolSize=3 2024-12-08T05:52:37,283 INFO [RS:0;0d942cb2025d:40801 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T05:52:37,283 INFO [RS:0;0d942cb2025d:40801 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T05:52:37,283 INFO [RS:0;0d942cb2025d:40801 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T05:52:37,283 INFO [RS:0;0d942cb2025d:40801 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-08T05:52:37,283 INFO [RS:0;0d942cb2025d:40801 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-08T05:52:37,283 INFO [RS:0;0d942cb2025d:40801 {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,40801,1733637157037-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T05:52:37,297 INFO [RS:0;0d942cb2025d:40801 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-08T05:52:37,297 INFO [RS:0;0d942cb2025d:40801 {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,40801,1733637157037-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T05:52:37,297 INFO [RS:0;0d942cb2025d:40801 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T05:52:37,297 INFO [RS:0;0d942cb2025d:40801 {}] regionserver.Replication(171): 0d942cb2025d,40801,1733637157037 started 2024-12-08T05:52:37,310 INFO [RS:0;0d942cb2025d:40801 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T05:52:37,310 INFO [RS:0;0d942cb2025d:40801 {}] regionserver.HRegionServer(1482): Serving as 0d942cb2025d,40801,1733637157037, RpcServer on 0d942cb2025d/172.17.0.2:40801, sessionid=0x10190a19af90001 2024-12-08T05:52:37,311 DEBUG [RS:0;0d942cb2025d:40801 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-08T05:52:37,311 DEBUG [RS:0;0d942cb2025d:40801 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 0d942cb2025d,40801,1733637157037 2024-12-08T05:52:37,311 DEBUG [RS:0;0d942cb2025d:40801 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0d942cb2025d,40801,1733637157037' 2024-12-08T05:52:37,311 DEBUG [RS:0;0d942cb2025d:40801 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-08T05:52:37,311 DEBUG [RS:0;0d942cb2025d:40801 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-08T05:52:37,311 DEBUG [RS:0;0d942cb2025d:40801 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-08T05:52:37,311 DEBUG [RS:0;0d942cb2025d:40801 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-08T05:52:37,311 DEBUG [RS:0;0d942cb2025d:40801 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 0d942cb2025d,40801,1733637157037 2024-12-08T05:52:37,311 DEBUG [RS:0;0d942cb2025d:40801 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0d942cb2025d,40801,1733637157037' 2024-12-08T05:52:37,311 DEBUG [RS:0;0d942cb2025d:40801 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-08T05:52:37,312 DEBUG [RS:0;0d942cb2025d:40801 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-08T05:52:37,312 DEBUG [RS:0;0d942cb2025d:40801 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-08T05:52:37,312 INFO [RS:0;0d942cb2025d:40801 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-08T05:52:37,312 INFO [RS:0;0d942cb2025d:40801 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-08T05:52:37,367 WARN [0d942cb2025d:44379 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-08T05:52:37,414 INFO [RS:0;0d942cb2025d:40801 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0d942cb2025d%2C40801%2C1733637157037, suffix=, logDir=hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/WALs/0d942cb2025d,40801,1733637157037, archiveDir=hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/oldWALs, maxLogs=32 2024-12-08T05:52:37,414 INFO [RS:0;0d942cb2025d:40801 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0d942cb2025d%2C40801%2C1733637157037.1733637157414 2024-12-08T05:52:37,420 INFO [RS:0;0d942cb2025d:40801 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/WALs/0d942cb2025d,40801,1733637157037/0d942cb2025d%2C40801%2C1733637157037.1733637157414 2024-12-08T05:52:37,420 DEBUG [RS:0;0d942cb2025d:40801 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39661:39661),(127.0.0.1/127.0.0.1:36391:36391)] 2024-12-08T05:52:37,617 DEBUG [0d942cb2025d:44379 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-08T05:52:37,618 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=0d942cb2025d,40801,1733637157037 2024-12-08T05:52:37,619 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0d942cb2025d,40801,1733637157037, state=OPENING 2024-12-08T05:52:37,620 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-08T05:52:37,621 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44379-0x10190a19af90000, quorum=127.0.0.1:58794, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:52:37,621 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40801-0x10190a19af90001, quorum=127.0.0.1:58794, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:52:37,622 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T05:52:37,622 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-08T05:52:37,622 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T05:52:37,622 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=0d942cb2025d,40801,1733637157037}] 2024-12-08T05:52:37,755 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:37,755 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:37,775 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-08T05:52:37,777 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35705, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-08T05:52:37,780 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-08T05:52:37,780 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T05:52:37,782 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0d942cb2025d%2C40801%2C1733637157037.meta, suffix=.meta, logDir=hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/WALs/0d942cb2025d,40801,1733637157037, archiveDir=hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/oldWALs, maxLogs=32 2024-12-08T05:52:37,782 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 0d942cb2025d%2C40801%2C1733637157037.meta.1733637157782.meta 2024-12-08T05:52:37,787 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/WALs/0d942cb2025d,40801,1733637157037/0d942cb2025d%2C40801%2C1733637157037.meta.1733637157782.meta 2024-12-08T05:52:37,788 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36391:36391),(127.0.0.1/127.0.0.1:39661:39661)] 2024-12-08T05:52:37,789 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-08T05:52:37,789 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-08T05:52:37,789 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-08T05:52:37,789 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-08T05:52:37,789 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-08T05:52:37,789 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T05:52:37,789 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-08T05:52:37,789 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-08T05:52:37,791 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-08T05:52:37,791 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-08T05:52:37,791 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:52:37,792 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:52:37,792 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-08T05:52:37,792 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-08T05:52:37,792 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:52:37,793 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:52:37,793 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-08T05:52:37,793 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-08T05:52:37,793 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:52:37,794 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:52:37,794 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-08T05:52:37,794 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-08T05:52:37,794 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:52:37,795 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:52:37,795 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-08T05:52:37,795 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/hbase/meta/1588230740 2024-12-08T05:52:37,796 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/hbase/meta/1588230740 2024-12-08T05:52:37,797 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-08T05:52:37,797 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-08T05:52:37,798 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-08T05:52:37,799 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-08T05:52:37,800 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=761230, jitterRate=-0.03204633295536041}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-08T05:52:37,800 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-08T05:52:37,800 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733637157790Writing region info on filesystem at 1733637157790Initializing all the Stores at 1733637157790Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733637157790Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733637157790Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733637157790Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733637157790Cleaning up temporary data from old regions at 1733637157797 (+7 ms)Running coprocessor post-open hooks at 1733637157800 (+3 ms)Region opened successfully at 1733637157800 2024-12-08T05:52:37,801 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733637157775 2024-12-08T05:52:37,803 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-08T05:52:37,803 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-08T05:52:37,804 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=0d942cb2025d,40801,1733637157037 2024-12-08T05:52:37,805 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0d942cb2025d,40801,1733637157037, state=OPEN 2024-12-08T05:52:37,817 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44379-0x10190a19af90000, quorum=127.0.0.1:58794, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T05:52:37,817 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40801-0x10190a19af90001, quorum=127.0.0.1:58794, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T05:52:37,817 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=0d942cb2025d,40801,1733637157037 2024-12-08T05:52:37,817 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T05:52:37,817 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T05:52:37,820 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-08T05:52:37,820 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=0d942cb2025d,40801,1733637157037 in 195 msec 2024-12-08T05:52:37,823 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-08T05:52:37,823 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 606 msec 2024-12-08T05:52:37,823 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T05:52:37,823 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-08T05:52:37,825 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-08T05:52:37,825 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=0d942cb2025d,40801,1733637157037, seqNum=-1] 2024-12-08T05:52:37,825 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T05:52:37,826 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59625, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T05:52:37,831 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 657 msec 2024-12-08T05:52:37,831 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733637157831, completionTime=-1 2024-12-08T05:52:37,831 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-08T05:52:37,831 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-08T05:52:37,833 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-08T05:52:37,833 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733637217833 2024-12-08T05:52:37,833 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733637277833 2024-12-08T05:52:37,833 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-12-08T05:52:37,833 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,44379,1733637156986-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T05:52:37,833 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,44379,1733637156986-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T05:52:37,833 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,44379,1733637156986-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T05:52:37,834 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-0d942cb2025d:44379, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T05:52:37,834 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-08T05:52:37,834 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-08T05:52:37,835 DEBUG [master/0d942cb2025d:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-08T05:52:37,837 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.769sec 2024-12-08T05:52:37,837 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-08T05:52:37,837 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-08T05:52:37,837 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-08T05:52:37,837 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-08T05:52:37,837 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-08T05:52:37,837 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,44379,1733637156986-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T05:52:37,838 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,44379,1733637156986-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-08T05:52:37,840 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-08T05:52:37,840 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-08T05:52:37,840 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,44379,1733637156986-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T05:52:37,851 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2ce23f2e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T05:52:37,851 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 0d942cb2025d,44379,-1 for getting cluster id 2024-12-08T05:52:37,851 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-08T05:52:37,853 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '57298462-6614-4b10-bc49-8954e10f897c' 2024-12-08T05:52:37,853 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-08T05:52:37,853 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "57298462-6614-4b10-bc49-8954e10f897c" 2024-12-08T05:52:37,853 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@533a12fb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T05:52:37,853 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [0d942cb2025d,44379,-1] 2024-12-08T05:52:37,853 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-08T05:52:37,854 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T05:52:37,854 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50488, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-08T05:52:37,855 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@13fc1909, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T05:52:37,856 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-08T05:52:37,856 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=0d942cb2025d,40801,1733637157037, seqNum=-1] 2024-12-08T05:52:37,857 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T05:52:37,858 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37848, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T05:52:37,859 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=0d942cb2025d,44379,1733637156986 2024-12-08T05:52:37,859 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:52:37,861 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-08T05:52:37,862 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-08T05:52:37,862 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is 0d942cb2025d,44379,1733637156986 2024-12-08T05:52:37,862 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@19dff15b 2024-12-08T05:52:37,863 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-08T05:52:37,863 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50496, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-08T05:52:37,864 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44379 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-08T05:52:37,864 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44379 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-08T05:52:37,864 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44379 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T05:52:37,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44379 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-12-08T05:52:37,867 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-08T05:52:37,867 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:52:37,867 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44379 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-12-08T05:52:37,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44379 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-08T05:52:37,868 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-08T05:52:37,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_1073741835_1011 (size=381) 2024-12-08T05:52:37,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39251 is added to blk_1073741835_1011 (size=381) 2024-12-08T05:52:38,277 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 438a8117f857d231782152090e27cd27, NAME => 'TestLogRolling-testLogRolling,,1733637157864.438a8117f857d231782152090e27cd27.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0 2024-12-08T05:52:38,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_1073741836_1012 (size=64) 2024-12-08T05:52:38,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39251 is added to blk_1073741836_1012 (size=64) 2024-12-08T05:52:38,284 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1733637157864.438a8117f857d231782152090e27cd27.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T05:52:38,284 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 438a8117f857d231782152090e27cd27, disabling compactions & flushes 2024-12-08T05:52:38,284 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1733637157864.438a8117f857d231782152090e27cd27. 2024-12-08T05:52:38,284 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1733637157864.438a8117f857d231782152090e27cd27. 2024-12-08T05:52:38,284 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1733637157864.438a8117f857d231782152090e27cd27. after waiting 0 ms 2024-12-08T05:52:38,284 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1733637157864.438a8117f857d231782152090e27cd27. 2024-12-08T05:52:38,284 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1733637157864.438a8117f857d231782152090e27cd27. 2024-12-08T05:52:38,284 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 438a8117f857d231782152090e27cd27: Waiting for close lock at 1733637158284Disabling compacts and flushes for region at 1733637158284Disabling writes for close at 1733637158284Writing region close event to WAL at 1733637158284Closed at 1733637158284 2024-12-08T05:52:38,285 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-08T05:52:38,286 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1733637157864.438a8117f857d231782152090e27cd27.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1733637158285"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733637158285"}]},"ts":"1733637158285"} 2024-12-08T05:52:38,288 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-08T05:52:38,289 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-08T05:52:38,289 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733637158289"}]},"ts":"1733637158289"} 2024-12-08T05:52:38,290 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-12-08T05:52:38,291 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=438a8117f857d231782152090e27cd27, ASSIGN}] 2024-12-08T05:52:38,292 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=438a8117f857d231782152090e27cd27, ASSIGN 2024-12-08T05:52:38,293 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=438a8117f857d231782152090e27cd27, ASSIGN; state=OFFLINE, location=0d942cb2025d,40801,1733637157037; forceNewPlan=false, retain=false 2024-12-08T05:52:38,443 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=438a8117f857d231782152090e27cd27, regionState=OPENING, regionLocation=0d942cb2025d,40801,1733637157037 2024-12-08T05:52:38,446 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=438a8117f857d231782152090e27cd27, ASSIGN because future has completed 2024-12-08T05:52:38,447 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 438a8117f857d231782152090e27cd27, server=0d942cb2025d,40801,1733637157037}] 2024-12-08T05:52:38,603 INFO [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1733637157864.438a8117f857d231782152090e27cd27. 2024-12-08T05:52:38,603 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 438a8117f857d231782152090e27cd27, NAME => 'TestLogRolling-testLogRolling,,1733637157864.438a8117f857d231782152090e27cd27.', STARTKEY => '', ENDKEY => ''} 2024-12-08T05:52:38,604 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 438a8117f857d231782152090e27cd27 2024-12-08T05:52:38,604 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1733637157864.438a8117f857d231782152090e27cd27.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T05:52:38,604 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 438a8117f857d231782152090e27cd27 2024-12-08T05:52:38,604 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 438a8117f857d231782152090e27cd27 2024-12-08T05:52:38,605 INFO [StoreOpener-438a8117f857d231782152090e27cd27-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 438a8117f857d231782152090e27cd27 2024-12-08T05:52:38,606 INFO [StoreOpener-438a8117f857d231782152090e27cd27-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 438a8117f857d231782152090e27cd27 columnFamilyName info 2024-12-08T05:52:38,606 DEBUG [StoreOpener-438a8117f857d231782152090e27cd27-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:52:38,607 INFO [StoreOpener-438a8117f857d231782152090e27cd27-1 {}] regionserver.HStore(327): Store=438a8117f857d231782152090e27cd27/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T05:52:38,607 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 438a8117f857d231782152090e27cd27 2024-12-08T05:52:38,608 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27 2024-12-08T05:52:38,608 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27 2024-12-08T05:52:38,608 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 438a8117f857d231782152090e27cd27 2024-12-08T05:52:38,608 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 438a8117f857d231782152090e27cd27 2024-12-08T05:52:38,610 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 438a8117f857d231782152090e27cd27 2024-12-08T05:52:38,611 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T05:52:38,612 INFO [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 438a8117f857d231782152090e27cd27; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=734158, jitterRate=-0.06647002696990967}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-08T05:52:38,612 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 438a8117f857d231782152090e27cd27 2024-12-08T05:52:38,612 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 438a8117f857d231782152090e27cd27: Running coprocessor pre-open hook at 1733637158604Writing region info on filesystem at 1733637158604Initializing all the Stores at 1733637158605 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733637158605Cleaning up temporary data from old regions at 1733637158608 (+3 ms)Running coprocessor post-open hooks at 1733637158612 (+4 ms)Region opened successfully at 1733637158612 2024-12-08T05:52:38,613 INFO [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1733637157864.438a8117f857d231782152090e27cd27., pid=6, masterSystemTime=1733637158599 2024-12-08T05:52:38,615 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1733637157864.438a8117f857d231782152090e27cd27. 2024-12-08T05:52:38,615 INFO [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1733637157864.438a8117f857d231782152090e27cd27. 2024-12-08T05:52:38,616 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=438a8117f857d231782152090e27cd27, regionState=OPEN, openSeqNum=2, regionLocation=0d942cb2025d,40801,1733637157037 2024-12-08T05:52:38,618 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 438a8117f857d231782152090e27cd27, server=0d942cb2025d,40801,1733637157037 because future has completed 2024-12-08T05:52:38,621 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-08T05:52:38,621 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 438a8117f857d231782152090e27cd27, server=0d942cb2025d,40801,1733637157037 in 172 msec 2024-12-08T05:52:38,624 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-08T05:52:38,624 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=438a8117f857d231782152090e27cd27, ASSIGN in 330 msec 2024-12-08T05:52:38,625 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-08T05:52:38,625 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733637158625"}]},"ts":"1733637158625"} 2024-12-08T05:52:38,627 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-12-08T05:52:38,627 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-08T05:52:38,629 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 763 msec 2024-12-08T05:52:38,755 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:38,755 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:39,756 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:39,756 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:40,753 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:40,754 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:40,754 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:40,754 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:40,754 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:40,754 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:40,754 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:40,755 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:40,756 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:40,756 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:40,771 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:40,771 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:40,771 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:40,771 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:40,771 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:40,771 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:40,774 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:40,774 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:40,774 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:40,776 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:41,280 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-08T05:52:41,280 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:41,281 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:41,281 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:41,281 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:41,281 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:41,281 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:41,282 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:41,282 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:41,300 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:41,300 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:41,301 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:41,301 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:41,301 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:41,301 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:41,304 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:41,305 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:41,305 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:41,307 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:41,757 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:41,757 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:42,758 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:42,758 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:43,278 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-08T05:52:43,278 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-12-08T05:52:43,758 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:43,758 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:44,759 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:44,759 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:45,648 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-12-08T05:52:45,648 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-12-08T05:52:45,649 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-08T05:52:45,759 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:45,759 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:46,760 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:46,760 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:47,761 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:47,761 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:47,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44379 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-08T05:52:47,937 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-12-08T05:52:47,937 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-12-08T05:52:47,940 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-12-08T05:52:47,940 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1733637157864.438a8117f857d231782152090e27cd27. 2024-12-08T05:52:47,942 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1733637157864.438a8117f857d231782152090e27cd27., hostname=0d942cb2025d,40801,1733637157037, seqNum=2] 2024-12-08T05:52:47,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40801 {}] regionserver.HRegion(8855): Flush requested on 438a8117f857d231782152090e27cd27 2024-12-08T05:52:47,955 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 438a8117f857d231782152090e27cd27 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-08T05:52:47,971 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/.tmp/info/c65627ecfc7347b0a03576d8499ba738 is 1080, key is row0001/info:/1733637167943/Put/seqid=0 2024-12-08T05:52:47,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39251 is added to blk_1073741837_1013 (size=12509) 2024-12-08T05:52:47,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_1073741837_1013 (size=12509) 2024-12-08T05:52:47,977 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/.tmp/info/c65627ecfc7347b0a03576d8499ba738 2024-12-08T05:52:47,984 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/.tmp/info/c65627ecfc7347b0a03576d8499ba738 as hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/info/c65627ecfc7347b0a03576d8499ba738 2024-12-08T05:52:47,989 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/info/c65627ecfc7347b0a03576d8499ba738, entries=7, sequenceid=11, filesize=12.2 K 2024-12-08T05:52:47,990 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for 438a8117f857d231782152090e27cd27 in 35ms, sequenceid=11, compaction requested=false 2024-12-08T05:52:47,990 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 438a8117f857d231782152090e27cd27: 2024-12-08T05:52:47,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40801 {}] regionserver.HRegion(8855): Flush requested on 438a8117f857d231782152090e27cd27 2024-12-08T05:52:47,991 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 438a8117f857d231782152090e27cd27 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-12-08T05:52:47,995 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/.tmp/info/d84d063b80534c978e2beafaffbd8071 is 1080, key is row0008/info:/1733637167956/Put/seqid=0 2024-12-08T05:52:47,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39251 is added to blk_1073741838_1014 (size=29761) 2024-12-08T05:52:47,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_1073741838_1014 (size=29761) 2024-12-08T05:52:48,000 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/.tmp/info/d84d063b80534c978e2beafaffbd8071 2024-12-08T05:52:48,004 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/.tmp/info/d84d063b80534c978e2beafaffbd8071 as hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/info/d84d063b80534c978e2beafaffbd8071 2024-12-08T05:52:48,008 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/info/d84d063b80534c978e2beafaffbd8071, entries=23, sequenceid=37, filesize=29.1 K 2024-12-08T05:52:48,009 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=2.10 KB/2152 for 438a8117f857d231782152090e27cd27 in 18ms, sequenceid=37, compaction requested=false 2024-12-08T05:52:48,009 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 438a8117f857d231782152090e27cd27: 2024-12-08T05:52:48,009 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=41.3 K, sizeToCheck=16.0 K 2024-12-08T05:52:48,009 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T05:52:48,009 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/info/d84d063b80534c978e2beafaffbd8071 because midkey is the same as first or last row 2024-12-08T05:52:48,761 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:48,761 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:49,762 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:49,762 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:50,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40801 {}] regionserver.HRegion(8855): Flush requested on 438a8117f857d231782152090e27cd27 2024-12-08T05:52:50,003 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 438a8117f857d231782152090e27cd27 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-08T05:52:50,008 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/.tmp/info/662d531b9a624f7fb6b006fb11593a15 is 1080, key is row0031/info:/1733637167992/Put/seqid=0 2024-12-08T05:52:50,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_1073741839_1015 (size=12509) 2024-12-08T05:52:50,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39251 is added to blk_1073741839_1015 (size=12509) 2024-12-08T05:52:50,014 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=47 (bloomFilter=true), to=hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/.tmp/info/662d531b9a624f7fb6b006fb11593a15 2024-12-08T05:52:50,020 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/.tmp/info/662d531b9a624f7fb6b006fb11593a15 as hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/info/662d531b9a624f7fb6b006fb11593a15 2024-12-08T05:52:50,025 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/info/662d531b9a624f7fb6b006fb11593a15, entries=7, sequenceid=47, filesize=12.2 K 2024-12-08T05:52:50,026 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=13.66 KB/13988 for 438a8117f857d231782152090e27cd27 in 23ms, sequenceid=47, compaction requested=true 2024-12-08T05:52:50,026 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 438a8117f857d231782152090e27cd27: 2024-12-08T05:52:50,026 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=53.5 K, sizeToCheck=16.0 K 2024-12-08T05:52:50,026 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T05:52:50,026 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/info/d84d063b80534c978e2beafaffbd8071 because midkey is the same as first or last row 2024-12-08T05:52:50,027 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 438a8117f857d231782152090e27cd27:info, priority=-2147483648, current under compaction store size is 1 2024-12-08T05:52:50,027 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T05:52:50,027 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T05:52:50,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40801 {}] regionserver.HRegion(8855): Flush requested on 438a8117f857d231782152090e27cd27 2024-12-08T05:52:50,028 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 438a8117f857d231782152090e27cd27 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-12-08T05:52:50,028 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 54779 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T05:52:50,028 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.HStore(1541): 438a8117f857d231782152090e27cd27/info is initiating minor compaction (all files) 2024-12-08T05:52:50,028 INFO [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 438a8117f857d231782152090e27cd27/info in TestLogRolling-testLogRolling,,1733637157864.438a8117f857d231782152090e27cd27. 2024-12-08T05:52:50,029 INFO [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/info/c65627ecfc7347b0a03576d8499ba738, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/info/d84d063b80534c978e2beafaffbd8071, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/info/662d531b9a624f7fb6b006fb11593a15] into tmpdir=hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/.tmp, totalSize=53.5 K 2024-12-08T05:52:50,029 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] compactions.Compactor(225): Compacting c65627ecfc7347b0a03576d8499ba738, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733637167943 2024-12-08T05:52:50,030 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] compactions.Compactor(225): Compacting d84d063b80534c978e2beafaffbd8071, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1733637167956 2024-12-08T05:52:50,030 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] compactions.Compactor(225): Compacting 662d531b9a624f7fb6b006fb11593a15, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1733637167992 2024-12-08T05:52:50,032 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/.tmp/info/8e95cc2b9cbe4e0dbc78f6698acd65f3 is 1080, key is row0038/info:/1733637170004/Put/seqid=0 2024-12-08T05:52:50,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39251 is added to blk_1073741840_1016 (size=20064) 2024-12-08T05:52:50,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_1073741840_1016 (size=20064) 2024-12-08T05:52:50,038 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=64 (bloomFilter=true), to=hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/.tmp/info/8e95cc2b9cbe4e0dbc78f6698acd65f3 2024-12-08T05:52:50,045 INFO [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 438a8117f857d231782152090e27cd27#info#compaction#58 average throughput is 18.98 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T05:52:50,045 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/.tmp/info/8e95cc2b9cbe4e0dbc78f6698acd65f3 as hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/info/8e95cc2b9cbe4e0dbc78f6698acd65f3 2024-12-08T05:52:50,045 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/.tmp/info/f73979f5e03a4af9ba0cf20858600345 is 1080, key is row0001/info:/1733637167943/Put/seqid=0 2024-12-08T05:52:50,051 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/info/8e95cc2b9cbe4e0dbc78f6698acd65f3, entries=14, sequenceid=64, filesize=19.6 K 2024-12-08T05:52:50,052 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=11.56 KB/11836 for 438a8117f857d231782152090e27cd27 in 24ms, sequenceid=64, compaction requested=false 2024-12-08T05:52:50,052 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 438a8117f857d231782152090e27cd27: 2024-12-08T05:52:50,053 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=73.1 K, sizeToCheck=16.0 K 2024-12-08T05:52:50,053 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T05:52:50,053 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/info/d84d063b80534c978e2beafaffbd8071 because midkey is the same as first or last row 2024-12-08T05:52:50,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40801 {}] regionserver.HRegion(8855): Flush requested on 438a8117f857d231782152090e27cd27 2024-12-08T05:52:50,054 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 438a8117f857d231782152090e27cd27 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-12-08T05:52:50,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39251 is added to blk_1073741841_1017 (size=44978) 2024-12-08T05:52:50,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_1073741841_1017 (size=44978) 2024-12-08T05:52:50,059 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/.tmp/info/3916034d45134d7a988517e2b7a81b12 is 1080, key is row0052/info:/1733637170029/Put/seqid=0 2024-12-08T05:52:50,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_1073741842_1018 (size=17894) 2024-12-08T05:52:50,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39251 is added to blk_1073741842_1018 (size=17894) 2024-12-08T05:52:50,066 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/.tmp/info/3916034d45134d7a988517e2b7a81b12 2024-12-08T05:52:50,066 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/.tmp/info/f73979f5e03a4af9ba0cf20858600345 as hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/info/f73979f5e03a4af9ba0cf20858600345 2024-12-08T05:52:50,072 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/.tmp/info/3916034d45134d7a988517e2b7a81b12 as hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/info/3916034d45134d7a988517e2b7a81b12 2024-12-08T05:52:50,074 INFO [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 438a8117f857d231782152090e27cd27/info of 438a8117f857d231782152090e27cd27 into f73979f5e03a4af9ba0cf20858600345(size=43.9 K), total size for store is 63.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T05:52:50,074 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 438a8117f857d231782152090e27cd27: 2024-12-08T05:52:50,074 INFO [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733637157864.438a8117f857d231782152090e27cd27., storeName=438a8117f857d231782152090e27cd27/info, priority=13, startTime=1733637170027; duration=0sec 2024-12-08T05:52:50,074 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=63.5 K, sizeToCheck=16.0 K 2024-12-08T05:52:50,074 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T05:52:50,074 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/info/f73979f5e03a4af9ba0cf20858600345 because midkey is the same as first or last row 2024-12-08T05:52:50,074 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=63.5 K, sizeToCheck=16.0 K 2024-12-08T05:52:50,074 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T05:52:50,075 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/info/f73979f5e03a4af9ba0cf20858600345 because midkey is the same as first or last row 2024-12-08T05:52:50,075 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=63.5 K, sizeToCheck=16.0 K 2024-12-08T05:52:50,075 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T05:52:50,075 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/info/f73979f5e03a4af9ba0cf20858600345 because midkey is the same as first or last row 2024-12-08T05:52:50,075 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T05:52:50,075 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 438a8117f857d231782152090e27cd27:info 2024-12-08T05:52:50,077 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/info/3916034d45134d7a988517e2b7a81b12, entries=12, sequenceid=79, filesize=17.5 K 2024-12-08T05:52:50,078 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=1.05 KB/1076 for 438a8117f857d231782152090e27cd27 in 24ms, sequenceid=79, compaction requested=true 2024-12-08T05:52:50,078 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 438a8117f857d231782152090e27cd27: 2024-12-08T05:52:50,078 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=81.0 K, sizeToCheck=16.0 K 2024-12-08T05:52:50,078 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T05:52:50,078 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/info/f73979f5e03a4af9ba0cf20858600345 because midkey is the same as first or last row 2024-12-08T05:52:50,078 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 438a8117f857d231782152090e27cd27:info, priority=-2147483648, current under compaction store size is 1 2024-12-08T05:52:50,078 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T05:52:50,078 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T05:52:50,079 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 82936 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T05:52:50,079 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.HStore(1541): 438a8117f857d231782152090e27cd27/info is initiating minor compaction (all files) 2024-12-08T05:52:50,079 INFO [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 438a8117f857d231782152090e27cd27/info in TestLogRolling-testLogRolling,,1733637157864.438a8117f857d231782152090e27cd27. 2024-12-08T05:52:50,079 INFO [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/info/f73979f5e03a4af9ba0cf20858600345, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/info/8e95cc2b9cbe4e0dbc78f6698acd65f3, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/info/3916034d45134d7a988517e2b7a81b12] into tmpdir=hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/.tmp, totalSize=81.0 K 2024-12-08T05:52:50,079 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] compactions.Compactor(225): Compacting f73979f5e03a4af9ba0cf20858600345, keycount=37, bloomtype=ROW, size=43.9 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1733637167943 2024-12-08T05:52:50,080 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8e95cc2b9cbe4e0dbc78f6698acd65f3, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=64, earliestPutTs=1733637170004 2024-12-08T05:52:50,080 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3916034d45134d7a988517e2b7a81b12, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1733637170029 2024-12-08T05:52:50,091 INFO [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 438a8117f857d231782152090e27cd27#info#compaction#60 average throughput is 32.32 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T05:52:50,092 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/.tmp/info/fe9a01eb86a94ef08c3c3bb52db23e04 is 1080, key is row0001/info:/1733637167943/Put/seqid=0 2024-12-08T05:52:50,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39251 is added to blk_1073741843_1019 (size=73224) 2024-12-08T05:52:50,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_1073741843_1019 (size=73224) 2024-12-08T05:52:50,102 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/.tmp/info/fe9a01eb86a94ef08c3c3bb52db23e04 as hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/info/fe9a01eb86a94ef08c3c3bb52db23e04 2024-12-08T05:52:50,107 INFO [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 438a8117f857d231782152090e27cd27/info of 438a8117f857d231782152090e27cd27 into fe9a01eb86a94ef08c3c3bb52db23e04(size=71.5 K), total size for store is 71.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T05:52:50,107 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 438a8117f857d231782152090e27cd27: 2024-12-08T05:52:50,107 INFO [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733637157864.438a8117f857d231782152090e27cd27., storeName=438a8117f857d231782152090e27cd27/info, priority=13, startTime=1733637170078; duration=0sec 2024-12-08T05:52:50,107 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=71.5 K, sizeToCheck=16.0 K 2024-12-08T05:52:50,107 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T05:52:50,108 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=71.5 K, sizeToCheck=16.0 K 2024-12-08T05:52:50,108 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T05:52:50,108 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=71.5 K, sizeToCheck=16.0 K 2024-12-08T05:52:50,108 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-08T05:52:50,109 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1733637157864.438a8117f857d231782152090e27cd27., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T05:52:50,109 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T05:52:50,109 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 438a8117f857d231782152090e27cd27:info 2024-12-08T05:52:50,110 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44379 {}] assignment.AssignmentManager(1363): Split request from 0d942cb2025d,40801,1733637157037, parent={ENCODED => 438a8117f857d231782152090e27cd27, NAME => 'TestLogRolling-testLogRolling,,1733637157864.438a8117f857d231782152090e27cd27.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-12-08T05:52:50,115 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44379 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=0d942cb2025d,40801,1733637157037 2024-12-08T05:52:50,118 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44379 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=438a8117f857d231782152090e27cd27, daughterA=c1e62187e6586f93a537f3e69fd81df4, daughterB=73c59f49873ab33a15eaa1ae94fa7685 2024-12-08T05:52:50,119 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=438a8117f857d231782152090e27cd27, daughterA=c1e62187e6586f93a537f3e69fd81df4, daughterB=73c59f49873ab33a15eaa1ae94fa7685 2024-12-08T05:52:50,120 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=438a8117f857d231782152090e27cd27, daughterA=c1e62187e6586f93a537f3e69fd81df4, daughterB=73c59f49873ab33a15eaa1ae94fa7685 2024-12-08T05:52:50,120 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=438a8117f857d231782152090e27cd27, daughterA=c1e62187e6586f93a537f3e69fd81df4, daughterB=73c59f49873ab33a15eaa1ae94fa7685 2024-12-08T05:52:50,127 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=438a8117f857d231782152090e27cd27, UNASSIGN}] 2024-12-08T05:52:50,128 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=438a8117f857d231782152090e27cd27, UNASSIGN 2024-12-08T05:52:50,129 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=438a8117f857d231782152090e27cd27, regionState=CLOSING, regionLocation=0d942cb2025d,40801,1733637157037 2024-12-08T05:52:50,132 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=438a8117f857d231782152090e27cd27, UNASSIGN because future has completed 2024-12-08T05:52:50,132 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-08T05:52:50,133 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 438a8117f857d231782152090e27cd27, server=0d942cb2025d,40801,1733637157037}] 2024-12-08T05:52:50,290 INFO [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close 438a8117f857d231782152090e27cd27 2024-12-08T05:52:50,290 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-12-08T05:52:50,290 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing 438a8117f857d231782152090e27cd27, disabling compactions & flushes 2024-12-08T05:52:50,290 INFO [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1733637157864.438a8117f857d231782152090e27cd27. 2024-12-08T05:52:50,290 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1733637157864.438a8117f857d231782152090e27cd27. 2024-12-08T05:52:50,290 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1733637157864.438a8117f857d231782152090e27cd27. after waiting 0 ms 2024-12-08T05:52:50,290 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1733637157864.438a8117f857d231782152090e27cd27. 2024-12-08T05:52:50,291 INFO [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(2902): Flushing 438a8117f857d231782152090e27cd27 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-08T05:52:50,295 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/.tmp/info/d0fc9c10e23640c89cc09f6d8cbf3851 is 1080, key is row0064/info:/1733637170055/Put/seqid=0 2024-12-08T05:52:50,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39251 is added to blk_1073741844_1020 (size=6033) 2024-12-08T05:52:50,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_1073741844_1020 (size=6033) 2024-12-08T05:52:50,300 INFO [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=85 (bloomFilter=true), to=hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/.tmp/info/d0fc9c10e23640c89cc09f6d8cbf3851 2024-12-08T05:52:50,305 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/.tmp/info/d0fc9c10e23640c89cc09f6d8cbf3851 as hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/info/d0fc9c10e23640c89cc09f6d8cbf3851 2024-12-08T05:52:50,309 INFO [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/info/d0fc9c10e23640c89cc09f6d8cbf3851, entries=1, sequenceid=85, filesize=5.9 K 2024-12-08T05:52:50,310 INFO [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 438a8117f857d231782152090e27cd27 in 20ms, sequenceid=85, compaction requested=false 2024-12-08T05:52:50,311 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733637157864.438a8117f857d231782152090e27cd27.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/info/c65627ecfc7347b0a03576d8499ba738, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/info/d84d063b80534c978e2beafaffbd8071, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/info/f73979f5e03a4af9ba0cf20858600345, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/info/662d531b9a624f7fb6b006fb11593a15, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/info/8e95cc2b9cbe4e0dbc78f6698acd65f3, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/info/3916034d45134d7a988517e2b7a81b12] to archive 2024-12-08T05:52:50,312 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733637157864.438a8117f857d231782152090e27cd27.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-08T05:52:50,313 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733637157864.438a8117f857d231782152090e27cd27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/info/c65627ecfc7347b0a03576d8499ba738 to hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/archive/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/info/c65627ecfc7347b0a03576d8499ba738 2024-12-08T05:52:50,315 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733637157864.438a8117f857d231782152090e27cd27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/info/d84d063b80534c978e2beafaffbd8071 to hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/archive/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/info/d84d063b80534c978e2beafaffbd8071 2024-12-08T05:52:50,316 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733637157864.438a8117f857d231782152090e27cd27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/info/f73979f5e03a4af9ba0cf20858600345 to hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/archive/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/info/f73979f5e03a4af9ba0cf20858600345 2024-12-08T05:52:50,317 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733637157864.438a8117f857d231782152090e27cd27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/info/662d531b9a624f7fb6b006fb11593a15 to hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/archive/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/info/662d531b9a624f7fb6b006fb11593a15 2024-12-08T05:52:50,318 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733637157864.438a8117f857d231782152090e27cd27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/info/8e95cc2b9cbe4e0dbc78f6698acd65f3 to hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/archive/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/info/8e95cc2b9cbe4e0dbc78f6698acd65f3 2024-12-08T05:52:50,319 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733637157864.438a8117f857d231782152090e27cd27.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/info/3916034d45134d7a988517e2b7a81b12 to hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/archive/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/info/3916034d45134d7a988517e2b7a81b12 2024-12-08T05:52:50,324 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/recovered.edits/88.seqid, newMaxSeqId=88, maxSeqId=1 2024-12-08T05:52:50,325 INFO [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1733637157864.438a8117f857d231782152090e27cd27. 2024-12-08T05:52:50,325 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for 438a8117f857d231782152090e27cd27: Waiting for close lock at 1733637170290Running coprocessor pre-close hooks at 1733637170290Disabling compacts and flushes for region at 1733637170290Disabling writes for close at 1733637170290Obtaining lock to block concurrent updates at 1733637170291 (+1 ms)Preparing flush snapshotting stores in 438a8117f857d231782152090e27cd27 at 1733637170291Finished memstore snapshotting TestLogRolling-testLogRolling,,1733637157864.438a8117f857d231782152090e27cd27., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1733637170291Flushing stores of TestLogRolling-testLogRolling,,1733637157864.438a8117f857d231782152090e27cd27. at 1733637170291Flushing 438a8117f857d231782152090e27cd27/info: creating writer at 1733637170292 (+1 ms)Flushing 438a8117f857d231782152090e27cd27/info: appending metadata at 1733637170294 (+2 ms)Flushing 438a8117f857d231782152090e27cd27/info: closing flushed file at 1733637170294Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1e7fd8d8: reopening flushed file at 1733637170304 (+10 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 438a8117f857d231782152090e27cd27 in 20ms, sequenceid=85, compaction requested=false at 1733637170310 (+6 ms)Writing region close event to WAL at 1733637170321 (+11 ms)Running coprocessor post-close hooks at 1733637170325 (+4 ms)Closed at 1733637170325 2024-12-08T05:52:50,327 INFO [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed 438a8117f857d231782152090e27cd27 2024-12-08T05:52:50,328 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=438a8117f857d231782152090e27cd27, regionState=CLOSED 2024-12-08T05:52:50,330 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 438a8117f857d231782152090e27cd27, server=0d942cb2025d,40801,1733637157037 because future has completed 2024-12-08T05:52:50,333 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-12-08T05:52:50,333 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure 438a8117f857d231782152090e27cd27, server=0d942cb2025d,40801,1733637157037 in 198 msec 2024-12-08T05:52:50,335 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-08T05:52:50,335 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=438a8117f857d231782152090e27cd27, UNASSIGN in 206 msec 2024-12-08T05:52:50,343 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:52:50,345 INFO [PEWorker-4 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 2 storefiles, region=438a8117f857d231782152090e27cd27, threads=2 2024-12-08T05:52:50,347 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/info/d0fc9c10e23640c89cc09f6d8cbf3851 for region: 438a8117f857d231782152090e27cd27 2024-12-08T05:52:50,347 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/info/fe9a01eb86a94ef08c3c3bb52db23e04 for region: 438a8117f857d231782152090e27cd27 2024-12-08T05:52:50,357 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/info/d0fc9c10e23640c89cc09f6d8cbf3851, top=true 2024-12-08T05:52:50,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_1073741845_1021 (size=27) 2024-12-08T05:52:50,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39251 is added to blk_1073741845_1021 (size=27) 2024-12-08T05:52:50,366 INFO [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/TestLogRolling-testLogRolling=438a8117f857d231782152090e27cd27-d0fc9c10e23640c89cc09f6d8cbf3851 for child: 73c59f49873ab33a15eaa1ae94fa7685, parent: 438a8117f857d231782152090e27cd27 2024-12-08T05:52:50,367 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/info/d0fc9c10e23640c89cc09f6d8cbf3851 for region: 438a8117f857d231782152090e27cd27 2024-12-08T05:52:50,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39251 is added to blk_1073741846_1022 (size=27) 2024-12-08T05:52:50,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_1073741846_1022 (size=27) 2024-12-08T05:52:50,372 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/info/fe9a01eb86a94ef08c3c3bb52db23e04 for region: 438a8117f857d231782152090e27cd27 2024-12-08T05:52:50,373 DEBUG [PEWorker-4 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region 438a8117f857d231782152090e27cd27 Daughter A: [hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/c1e62187e6586f93a537f3e69fd81df4/info/fe9a01eb86a94ef08c3c3bb52db23e04.438a8117f857d231782152090e27cd27] storefiles, Daughter B: [hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/TestLogRolling-testLogRolling=438a8117f857d231782152090e27cd27-d0fc9c10e23640c89cc09f6d8cbf3851, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/fe9a01eb86a94ef08c3c3bb52db23e04.438a8117f857d231782152090e27cd27] storefiles. 2024-12-08T05:52:50,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39251 is added to blk_1073741847_1023 (size=71) 2024-12-08T05:52:50,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_1073741847_1023 (size=71) 2024-12-08T05:52:50,383 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:52:50,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39251 is added to blk_1073741848_1024 (size=71) 2024-12-08T05:52:50,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_1073741848_1024 (size=71) 2024-12-08T05:52:50,394 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:52:50,402 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/c1e62187e6586f93a537f3e69fd81df4/recovered.edits/88.seqid, newMaxSeqId=88, maxSeqId=-1 2024-12-08T05:52:50,404 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/recovered.edits/88.seqid, newMaxSeqId=88, maxSeqId=-1 2024-12-08T05:52:50,406 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1733637157864.438a8117f857d231782152090e27cd27.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1733637170406"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1733637170406"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1733637170406"}]},"ts":"1733637170406"} 2024-12-08T05:52:50,406 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1733637170115.c1e62187e6586f93a537f3e69fd81df4.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1733637170406"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733637170406"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733637170406"}]},"ts":"1733637170406"} 2024-12-08T05:52:50,406 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1733637170115.73c59f49873ab33a15eaa1ae94fa7685.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1733637170406"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733637170406"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733637170406"}]},"ts":"1733637170406"} 2024-12-08T05:52:50,423 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c1e62187e6586f93a537f3e69fd81df4, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=73c59f49873ab33a15eaa1ae94fa7685, ASSIGN}] 2024-12-08T05:52:50,425 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c1e62187e6586f93a537f3e69fd81df4, ASSIGN 2024-12-08T05:52:50,425 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=73c59f49873ab33a15eaa1ae94fa7685, ASSIGN 2024-12-08T05:52:50,426 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c1e62187e6586f93a537f3e69fd81df4, ASSIGN; state=SPLITTING_NEW, location=0d942cb2025d,40801,1733637157037; forceNewPlan=false, retain=false 2024-12-08T05:52:50,426 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=73c59f49873ab33a15eaa1ae94fa7685, ASSIGN; state=SPLITTING_NEW, location=0d942cb2025d,40801,1733637157037; forceNewPlan=false, retain=false 2024-12-08T05:52:50,576 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=73c59f49873ab33a15eaa1ae94fa7685, regionState=OPENING, regionLocation=0d942cb2025d,40801,1733637157037 2024-12-08T05:52:50,576 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=c1e62187e6586f93a537f3e69fd81df4, regionState=OPENING, regionLocation=0d942cb2025d,40801,1733637157037 2024-12-08T05:52:50,579 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c1e62187e6586f93a537f3e69fd81df4, ASSIGN because future has completed 2024-12-08T05:52:50,580 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure c1e62187e6586f93a537f3e69fd81df4, server=0d942cb2025d,40801,1733637157037}] 2024-12-08T05:52:50,580 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=73c59f49873ab33a15eaa1ae94fa7685, ASSIGN because future has completed 2024-12-08T05:52:50,581 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 73c59f49873ab33a15eaa1ae94fa7685, server=0d942cb2025d,40801,1733637157037}] 2024-12-08T05:52:50,735 INFO [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1733637170115.c1e62187e6586f93a537f3e69fd81df4. 2024-12-08T05:52:50,735 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => c1e62187e6586f93a537f3e69fd81df4, NAME => 'TestLogRolling-testLogRolling,,1733637170115.c1e62187e6586f93a537f3e69fd81df4.', STARTKEY => '', ENDKEY => 'row0062'} 2024-12-08T05:52:50,736 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling c1e62187e6586f93a537f3e69fd81df4 2024-12-08T05:52:50,736 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1733637170115.c1e62187e6586f93a537f3e69fd81df4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T05:52:50,736 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for c1e62187e6586f93a537f3e69fd81df4 2024-12-08T05:52:50,736 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for c1e62187e6586f93a537f3e69fd81df4 2024-12-08T05:52:50,737 INFO [StoreOpener-c1e62187e6586f93a537f3e69fd81df4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region c1e62187e6586f93a537f3e69fd81df4 2024-12-08T05:52:50,738 INFO [StoreOpener-c1e62187e6586f93a537f3e69fd81df4-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c1e62187e6586f93a537f3e69fd81df4 columnFamilyName info 2024-12-08T05:52:50,738 DEBUG [StoreOpener-c1e62187e6586f93a537f3e69fd81df4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:52:50,748 DEBUG [StoreOpener-c1e62187e6586f93a537f3e69fd81df4-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/c1e62187e6586f93a537f3e69fd81df4/info/fe9a01eb86a94ef08c3c3bb52db23e04.438a8117f857d231782152090e27cd27->hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/info/fe9a01eb86a94ef08c3c3bb52db23e04-bottom 2024-12-08T05:52:50,749 INFO [StoreOpener-c1e62187e6586f93a537f3e69fd81df4-1 {}] regionserver.HStore(327): Store=c1e62187e6586f93a537f3e69fd81df4/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T05:52:50,749 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for c1e62187e6586f93a537f3e69fd81df4 2024-12-08T05:52:50,750 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/c1e62187e6586f93a537f3e69fd81df4 2024-12-08T05:52:50,751 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/c1e62187e6586f93a537f3e69fd81df4 2024-12-08T05:52:50,751 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for c1e62187e6586f93a537f3e69fd81df4 2024-12-08T05:52:50,751 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for c1e62187e6586f93a537f3e69fd81df4 2024-12-08T05:52:50,753 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for c1e62187e6586f93a537f3e69fd81df4 2024-12-08T05:52:50,754 INFO [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened c1e62187e6586f93a537f3e69fd81df4; next sequenceid=89; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=757859, jitterRate=-0.036333709955215454}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-08T05:52:50,754 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for c1e62187e6586f93a537f3e69fd81df4 2024-12-08T05:52:50,754 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for c1e62187e6586f93a537f3e69fd81df4: Running coprocessor pre-open hook at 1733637170736Writing region info on filesystem at 1733637170736Initializing all the Stores at 1733637170737 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733637170737Cleaning up temporary data from old regions at 1733637170751 (+14 ms)Running coprocessor post-open hooks at 1733637170754 (+3 ms)Region opened successfully at 1733637170754 2024-12-08T05:52:50,755 INFO [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1733637170115.c1e62187e6586f93a537f3e69fd81df4., pid=12, masterSystemTime=1733637170732 2024-12-08T05:52:50,755 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store c1e62187e6586f93a537f3e69fd81df4:info, priority=-2147483648, current under compaction store size is 1 2024-12-08T05:52:50,755 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T05:52:50,755 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-12-08T05:52:50,756 INFO [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1733637170115.c1e62187e6586f93a537f3e69fd81df4. 2024-12-08T05:52:50,756 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.HStore(1541): c1e62187e6586f93a537f3e69fd81df4/info is initiating minor compaction (all files) 2024-12-08T05:52:50,756 INFO [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of c1e62187e6586f93a537f3e69fd81df4/info in TestLogRolling-testLogRolling,,1733637170115.c1e62187e6586f93a537f3e69fd81df4. 2024-12-08T05:52:50,756 INFO [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/c1e62187e6586f93a537f3e69fd81df4/info/fe9a01eb86a94ef08c3c3bb52db23e04.438a8117f857d231782152090e27cd27->hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/info/fe9a01eb86a94ef08c3c3bb52db23e04-bottom] into tmpdir=hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/c1e62187e6586f93a537f3e69fd81df4/.tmp, totalSize=71.5 K 2024-12-08T05:52:50,757 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] compactions.Compactor(225): Compacting fe9a01eb86a94ef08c3c3bb52db23e04.438a8117f857d231782152090e27cd27, keycount=31, bloomtype=ROW, size=71.5 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1733637167943 2024-12-08T05:52:50,758 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1733637170115.c1e62187e6586f93a537f3e69fd81df4. 2024-12-08T05:52:50,758 INFO [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1733637170115.c1e62187e6586f93a537f3e69fd81df4. 2024-12-08T05:52:50,758 INFO [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1733637170115.73c59f49873ab33a15eaa1ae94fa7685. 2024-12-08T05:52:50,758 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => 73c59f49873ab33a15eaa1ae94fa7685, NAME => 'TestLogRolling-testLogRolling,row0062,1733637170115.73c59f49873ab33a15eaa1ae94fa7685.', STARTKEY => 'row0062', ENDKEY => ''} 2024-12-08T05:52:50,758 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 73c59f49873ab33a15eaa1ae94fa7685 2024-12-08T05:52:50,759 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1733637170115.73c59f49873ab33a15eaa1ae94fa7685.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T05:52:50,759 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for 73c59f49873ab33a15eaa1ae94fa7685 2024-12-08T05:52:50,759 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for 73c59f49873ab33a15eaa1ae94fa7685 2024-12-08T05:52:50,759 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=c1e62187e6586f93a537f3e69fd81df4, regionState=OPEN, openSeqNum=89, regionLocation=0d942cb2025d,40801,1733637157037 2024-12-08T05:52:50,760 INFO [StoreOpener-73c59f49873ab33a15eaa1ae94fa7685-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 73c59f49873ab33a15eaa1ae94fa7685 2024-12-08T05:52:50,761 INFO [StoreOpener-73c59f49873ab33a15eaa1ae94fa7685-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 73c59f49873ab33a15eaa1ae94fa7685 columnFamilyName info 2024-12-08T05:52:50,761 DEBUG [StoreOpener-73c59f49873ab33a15eaa1ae94fa7685-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:52:50,761 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40801 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-12-08T05:52:50,761 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-12-08T05:52:50,761 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.11 KB heapSize=8.96 KB 2024-12-08T05:52:50,762 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure c1e62187e6586f93a537f3e69fd81df4, server=0d942cb2025d,40801,1733637157037 because future has completed 2024-12-08T05:52:50,762 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:50,762 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:50,773 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=10 2024-12-08T05:52:50,773 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure c1e62187e6586f93a537f3e69fd81df4, server=0d942cb2025d,40801,1733637157037 in 191 msec 2024-12-08T05:52:50,775 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c1e62187e6586f93a537f3e69fd81df4, ASSIGN in 350 msec 2024-12-08T05:52:50,775 DEBUG [StoreOpener-73c59f49873ab33a15eaa1ae94fa7685-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/TestLogRolling-testLogRolling=438a8117f857d231782152090e27cd27-d0fc9c10e23640c89cc09f6d8cbf3851 2024-12-08T05:52:50,778 INFO [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1e62187e6586f93a537f3e69fd81df4#info#compaction#62 average throughput is 31.30 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T05:52:50,779 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/c1e62187e6586f93a537f3e69fd81df4/.tmp/info/bba26d9aea2541619b8bac0425f6fbf8 is 1080, key is row0001/info:/1733637167943/Put/seqid=0 2024-12-08T05:52:50,780 DEBUG [StoreOpener-73c59f49873ab33a15eaa1ae94fa7685-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/fe9a01eb86a94ef08c3c3bb52db23e04.438a8117f857d231782152090e27cd27->hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/info/fe9a01eb86a94ef08c3c3bb52db23e04-top 2024-12-08T05:52:50,780 INFO [StoreOpener-73c59f49873ab33a15eaa1ae94fa7685-1 {}] regionserver.HStore(327): Store=73c59f49873ab33a15eaa1ae94fa7685/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T05:52:50,780 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for 73c59f49873ab33a15eaa1ae94fa7685 2024-12-08T05:52:50,781 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685 2024-12-08T05:52:50,782 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685 2024-12-08T05:52:50,783 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for 73c59f49873ab33a15eaa1ae94fa7685 2024-12-08T05:52:50,783 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for 73c59f49873ab33a15eaa1ae94fa7685 2024-12-08T05:52:50,785 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for 73c59f49873ab33a15eaa1ae94fa7685 2024-12-08T05:52:50,785 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/hbase/meta/1588230740/.tmp/info/17306d8d77084e25b326b9484b082489 is 193, key is TestLogRolling-testLogRolling,row0062,1733637170115.73c59f49873ab33a15eaa1ae94fa7685./info:regioninfo/1733637170576/Put/seqid=0 2024-12-08T05:52:50,786 INFO [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened 73c59f49873ab33a15eaa1ae94fa7685; next sequenceid=89; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=864454, jitterRate=0.09921044111251831}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-08T05:52:50,786 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 73c59f49873ab33a15eaa1ae94fa7685 2024-12-08T05:52:50,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39251 is added to blk_1073741849_1025 (size=70862) 2024-12-08T05:52:50,786 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for 73c59f49873ab33a15eaa1ae94fa7685: Running coprocessor pre-open hook at 1733637170759Writing region info on filesystem at 1733637170759Initializing all the Stores at 1733637170759Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733637170759Cleaning up temporary data from old regions at 1733637170783 (+24 ms)Running coprocessor post-open hooks at 1733637170786 (+3 ms)Region opened successfully at 1733637170786 2024-12-08T05:52:50,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_1073741849_1025 (size=70862) 2024-12-08T05:52:50,787 INFO [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1733637170115.73c59f49873ab33a15eaa1ae94fa7685., pid=13, masterSystemTime=1733637170732 2024-12-08T05:52:50,787 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store 73c59f49873ab33a15eaa1ae94fa7685:info, priority=-2147483648, current under compaction store size is 2 2024-12-08T05:52:50,787 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T05:52:50,787 DEBUG [RS:0;0d942cb2025d:40801-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-08T05:52:50,788 INFO [RS:0;0d942cb2025d:40801-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1733637170115.73c59f49873ab33a15eaa1ae94fa7685. 2024-12-08T05:52:50,788 DEBUG [RS:0;0d942cb2025d:40801-longCompactions-0 {}] regionserver.HStore(1541): 73c59f49873ab33a15eaa1ae94fa7685/info is initiating minor compaction (all files) 2024-12-08T05:52:50,788 INFO [RS:0;0d942cb2025d:40801-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 73c59f49873ab33a15eaa1ae94fa7685/info in TestLogRolling-testLogRolling,row0062,1733637170115.73c59f49873ab33a15eaa1ae94fa7685. 2024-12-08T05:52:50,788 INFO [RS:0;0d942cb2025d:40801-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/fe9a01eb86a94ef08c3c3bb52db23e04.438a8117f857d231782152090e27cd27->hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/info/fe9a01eb86a94ef08c3c3bb52db23e04-top, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/TestLogRolling-testLogRolling=438a8117f857d231782152090e27cd27-d0fc9c10e23640c89cc09f6d8cbf3851] into tmpdir=hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp, totalSize=77.4 K 2024-12-08T05:52:50,789 DEBUG [RS:0;0d942cb2025d:40801-longCompactions-0 {}] compactions.Compactor(225): Compacting fe9a01eb86a94ef08c3c3bb52db23e04.438a8117f857d231782152090e27cd27, keycount=31, bloomtype=ROW, size=71.5 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1733637167943 2024-12-08T05:52:50,789 DEBUG [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1733637170115.73c59f49873ab33a15eaa1ae94fa7685. 2024-12-08T05:52:50,790 DEBUG [RS:0;0d942cb2025d:40801-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=438a8117f857d231782152090e27cd27-d0fc9c10e23640c89cc09f6d8cbf3851, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=85, earliestPutTs=1733637170055 2024-12-08T05:52:50,790 INFO [RS_OPEN_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1733637170115.73c59f49873ab33a15eaa1ae94fa7685. 2024-12-08T05:52:50,790 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=73c59f49873ab33a15eaa1ae94fa7685, regionState=OPEN, openSeqNum=89, regionLocation=0d942cb2025d,40801,1733637157037 2024-12-08T05:52:50,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39251 is added to blk_1073741850_1026 (size=9847) 2024-12-08T05:52:50,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_1073741850_1026 (size=9847) 2024-12-08T05:52:50,793 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 73c59f49873ab33a15eaa1ae94fa7685, server=0d942cb2025d,40801,1733637157037 because future has completed 2024-12-08T05:52:50,793 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.92 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/hbase/meta/1588230740/.tmp/info/17306d8d77084e25b326b9484b082489 2024-12-08T05:52:50,793 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/c1e62187e6586f93a537f3e69fd81df4/.tmp/info/bba26d9aea2541619b8bac0425f6fbf8 as hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/c1e62187e6586f93a537f3e69fd81df4/info/bba26d9aea2541619b8bac0425f6fbf8 2024-12-08T05:52:50,797 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=11 2024-12-08T05:52:50,797 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure 73c59f49873ab33a15eaa1ae94fa7685, server=0d942cb2025d,40801,1733637157037 in 213 msec 2024-12-08T05:52:50,801 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=7 2024-12-08T05:52:50,801 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=73c59f49873ab33a15eaa1ae94fa7685, ASSIGN in 374 msec 2024-12-08T05:52:50,801 INFO [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in c1e62187e6586f93a537f3e69fd81df4/info of c1e62187e6586f93a537f3e69fd81df4 into bba26d9aea2541619b8bac0425f6fbf8(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T05:52:50,801 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for c1e62187e6586f93a537f3e69fd81df4: 2024-12-08T05:52:50,801 INFO [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733637170115.c1e62187e6586f93a537f3e69fd81df4., storeName=c1e62187e6586f93a537f3e69fd81df4/info, priority=15, startTime=1733637170755; duration=0sec 2024-12-08T05:52:50,801 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T05:52:50,801 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1e62187e6586f93a537f3e69fd81df4:info 2024-12-08T05:52:50,803 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=438a8117f857d231782152090e27cd27, daughterA=c1e62187e6586f93a537f3e69fd81df4, daughterB=73c59f49873ab33a15eaa1ae94fa7685 in 686 msec 2024-12-08T05:52:50,811 INFO [RS:0;0d942cb2025d:40801-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 73c59f49873ab33a15eaa1ae94fa7685#info#compaction#64 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T05:52:50,812 DEBUG [RS:0;0d942cb2025d:40801-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp/info/0f0ed446a47840619a15d2d9f922e9bc is 1080, key is row0062/info:/1733637170051/Put/seqid=0 2024-12-08T05:52:50,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_1073741851_1027 (size=8359) 2024-12-08T05:52:50,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39251 is added to blk_1073741851_1027 (size=8359) 2024-12-08T05:52:50,821 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/hbase/meta/1588230740/.tmp/ns/9adb915ec70441d19a94f4ff54f7c1e9 is 43, key is default/ns:d/1733637157827/Put/seqid=0 2024-12-08T05:52:50,823 DEBUG [RS:0;0d942cb2025d:40801-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp/info/0f0ed446a47840619a15d2d9f922e9bc as hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/0f0ed446a47840619a15d2d9f922e9bc 2024-12-08T05:52:50,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39251 is added to blk_1073741852_1028 (size=5153) 2024-12-08T05:52:50,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_1073741852_1028 (size=5153) 2024-12-08T05:52:50,826 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/hbase/meta/1588230740/.tmp/ns/9adb915ec70441d19a94f4ff54f7c1e9 2024-12-08T05:52:50,830 INFO [RS:0;0d942cb2025d:40801-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 2 (all) file(s) in 73c59f49873ab33a15eaa1ae94fa7685/info of 73c59f49873ab33a15eaa1ae94fa7685 into 0f0ed446a47840619a15d2d9f922e9bc(size=8.2 K), total size for store is 8.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T05:52:50,830 DEBUG [RS:0;0d942cb2025d:40801-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 73c59f49873ab33a15eaa1ae94fa7685: 2024-12-08T05:52:50,830 INFO [RS:0;0d942cb2025d:40801-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733637170115.73c59f49873ab33a15eaa1ae94fa7685., storeName=73c59f49873ab33a15eaa1ae94fa7685/info, priority=14, startTime=1733637170787; duration=0sec 2024-12-08T05:52:50,830 DEBUG [RS:0;0d942cb2025d:40801-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T05:52:50,830 DEBUG [RS:0;0d942cb2025d:40801-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 73c59f49873ab33a15eaa1ae94fa7685:info 2024-12-08T05:52:50,844 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/hbase/meta/1588230740/.tmp/table/616e747ddab34449a34ceb51659df138 is 65, key is TestLogRolling-testLogRolling/table:state/1733637158625/Put/seqid=0 2024-12-08T05:52:50,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39251 is added to blk_1073741853_1029 (size=5340) 2024-12-08T05:52:50,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_1073741853_1029 (size=5340) 2024-12-08T05:52:50,850 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/hbase/meta/1588230740/.tmp/table/616e747ddab34449a34ceb51659df138 2024-12-08T05:52:50,856 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/hbase/meta/1588230740/.tmp/info/17306d8d77084e25b326b9484b082489 as hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/hbase/meta/1588230740/info/17306d8d77084e25b326b9484b082489 2024-12-08T05:52:50,861 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/hbase/meta/1588230740/info/17306d8d77084e25b326b9484b082489, entries=30, sequenceid=17, filesize=9.6 K 2024-12-08T05:52:50,861 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/hbase/meta/1588230740/.tmp/ns/9adb915ec70441d19a94f4ff54f7c1e9 as hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/hbase/meta/1588230740/ns/9adb915ec70441d19a94f4ff54f7c1e9 2024-12-08T05:52:50,866 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/hbase/meta/1588230740/ns/9adb915ec70441d19a94f4ff54f7c1e9, entries=2, sequenceid=17, filesize=5.0 K 2024-12-08T05:52:50,866 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/hbase/meta/1588230740/.tmp/table/616e747ddab34449a34ceb51659df138 as hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/hbase/meta/1588230740/table/616e747ddab34449a34ceb51659df138 2024-12-08T05:52:50,870 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/hbase/meta/1588230740/table/616e747ddab34449a34ceb51659df138, entries=2, sequenceid=17, filesize=5.2 K 2024-12-08T05:52:50,871 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.11 KB/5234, heapSize ~8.66 KB/8872, currentSize=705 B/705 for 1588230740 in 110ms, sequenceid=17, compaction requested=false 2024-12-08T05:52:50,871 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-12-08T05:52:51,151 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-08T05:52:51,152 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:51,152 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:51,153 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:51,153 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:51,153 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:51,153 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:51,154 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:51,154 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:51,174 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:51,174 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:51,174 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:51,174 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:51,174 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:51,175 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:51,178 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:51,178 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:51,178 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:51,181 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:51,763 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:51,763 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:52,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40801 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:37848 deadline: 1733637182058, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1733637157864.438a8117f857d231782152090e27cd27. is not online on 0d942cb2025d,40801,1733637157037 2024-12-08T05:52:52,081 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1733637157864.438a8117f857d231782152090e27cd27., hostname=0d942cb2025d,40801,1733637157037, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1733637157864.438a8117f857d231782152090e27cd27., hostname=0d942cb2025d,40801,1733637157037, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1733637157864.438a8117f857d231782152090e27cd27. is not online on 0d942cb2025d,40801,1733637157037 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-08T05:52:52,082 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1733637157864.438a8117f857d231782152090e27cd27., hostname=0d942cb2025d,40801,1733637157037, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1733637157864.438a8117f857d231782152090e27cd27. is not online on 0d942cb2025d,40801,1733637157037 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-08T05:52:52,082 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1733637157864.438a8117f857d231782152090e27cd27., hostname=0d942cb2025d,40801,1733637157037, seqNum=2 from cache 2024-12-08T05:52:52,764 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:52,764 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:53,764 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:53,764 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:54,765 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:54,765 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:55,766 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:55,766 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:56,238 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-08T05:52:56,239 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:56,239 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:56,239 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:56,240 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:56,240 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:56,240 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:56,240 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:56,240 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:56,259 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:56,260 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:56,260 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:56,260 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:56,260 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:56,261 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:56,263 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:56,264 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:56,264 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:56,266 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-08T05:52:56,766 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:56,766 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:57,767 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:57,767 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:58,768 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:58,768 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:59,769 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:52:59,769 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:53:00,769 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:53:00,769 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:53:01,770 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:53:01,770 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:53:02,143 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0065', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1733637170115.73c59f49873ab33a15eaa1ae94fa7685., hostname=0d942cb2025d,40801,1733637157037, seqNum=89] 2024-12-08T05:53:02,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40801 {}] regionserver.HRegion(8855): Flush requested on 73c59f49873ab33a15eaa1ae94fa7685 2024-12-08T05:53:02,154 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 73c59f49873ab33a15eaa1ae94fa7685 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-08T05:53:02,160 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp/info/5882021201cc4ca0b4cbd23116c2a77f is 1080, key is row0065/info:/1733637182144/Put/seqid=0 2024-12-08T05:53:02,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_1073741854_1030 (size=12509) 2024-12-08T05:53:02,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39251 is added to blk_1073741854_1030 (size=12509) 2024-12-08T05:53:02,165 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=99 (bloomFilter=true), to=hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp/info/5882021201cc4ca0b4cbd23116c2a77f 2024-12-08T05:53:02,171 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp/info/5882021201cc4ca0b4cbd23116c2a77f as hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/5882021201cc4ca0b4cbd23116c2a77f 2024-12-08T05:53:02,176 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/5882021201cc4ca0b4cbd23116c2a77f, entries=7, sequenceid=99, filesize=12.2 K 2024-12-08T05:53:02,177 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=13.66 KB/13988 for 73c59f49873ab33a15eaa1ae94fa7685 in 23ms, sequenceid=99, compaction requested=false 2024-12-08T05:53:02,177 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 73c59f49873ab33a15eaa1ae94fa7685: 2024-12-08T05:53:02,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40801 {}] regionserver.HRegion(8855): Flush requested on 73c59f49873ab33a15eaa1ae94fa7685 2024-12-08T05:53:02,178 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 73c59f49873ab33a15eaa1ae94fa7685 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-12-08T05:53:02,182 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp/info/45fb0bdb87ff4589bd88a22a84540a08 is 1080, key is row0072/info:/1733637182155/Put/seqid=0 2024-12-08T05:53:02,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39251 is added to blk_1073741855_1031 (size=20064) 2024-12-08T05:53:02,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_1073741855_1031 (size=20064) 2024-12-08T05:53:02,187 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp/info/45fb0bdb87ff4589bd88a22a84540a08 2024-12-08T05:53:02,193 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp/info/45fb0bdb87ff4589bd88a22a84540a08 as hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/45fb0bdb87ff4589bd88a22a84540a08 2024-12-08T05:53:02,198 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/45fb0bdb87ff4589bd88a22a84540a08, entries=14, sequenceid=116, filesize=19.6 K 2024-12-08T05:53:02,199 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=11.56 KB/11836 for 73c59f49873ab33a15eaa1ae94fa7685 in 21ms, sequenceid=116, compaction requested=true 2024-12-08T05:53:02,199 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 73c59f49873ab33a15eaa1ae94fa7685: 2024-12-08T05:53:02,199 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 73c59f49873ab33a15eaa1ae94fa7685:info, priority=-2147483648, current under compaction store size is 1 2024-12-08T05:53:02,199 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T05:53:02,199 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T05:53:02,200 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40932 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T05:53:02,200 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.HStore(1541): 73c59f49873ab33a15eaa1ae94fa7685/info is initiating minor compaction (all files) 2024-12-08T05:53:02,200 INFO [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 73c59f49873ab33a15eaa1ae94fa7685/info in TestLogRolling-testLogRolling,row0062,1733637170115.73c59f49873ab33a15eaa1ae94fa7685. 2024-12-08T05:53:02,200 INFO [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/0f0ed446a47840619a15d2d9f922e9bc, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/5882021201cc4ca0b4cbd23116c2a77f, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/45fb0bdb87ff4589bd88a22a84540a08] into tmpdir=hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp, totalSize=40.0 K 2024-12-08T05:53:02,200 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0f0ed446a47840619a15d2d9f922e9bc, keycount=3, bloomtype=ROW, size=8.2 K, encoding=NONE, compression=NONE, seqNum=85, earliestPutTs=1733637170051 2024-12-08T05:53:02,201 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5882021201cc4ca0b4cbd23116c2a77f, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=99, earliestPutTs=1733637182144 2024-12-08T05:53:02,201 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] compactions.Compactor(225): Compacting 45fb0bdb87ff4589bd88a22a84540a08, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1733637182155 2024-12-08T05:53:02,211 INFO [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 73c59f49873ab33a15eaa1ae94fa7685#info#compaction#69 average throughput is 24.63 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T05:53:02,212 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp/info/22e0cc4a98334888ae8e8b6e482d8922 is 1080, key is row0062/info:/1733637170051/Put/seqid=0 2024-12-08T05:53:02,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_1073741856_1032 (size=31106) 2024-12-08T05:53:02,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39251 is added to blk_1073741856_1032 (size=31106) 2024-12-08T05:53:02,223 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp/info/22e0cc4a98334888ae8e8b6e482d8922 as hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/22e0cc4a98334888ae8e8b6e482d8922 2024-12-08T05:53:02,229 INFO [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 73c59f49873ab33a15eaa1ae94fa7685/info of 73c59f49873ab33a15eaa1ae94fa7685 into 22e0cc4a98334888ae8e8b6e482d8922(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T05:53:02,229 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 73c59f49873ab33a15eaa1ae94fa7685: 2024-12-08T05:53:02,229 INFO [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733637170115.73c59f49873ab33a15eaa1ae94fa7685., storeName=73c59f49873ab33a15eaa1ae94fa7685/info, priority=13, startTime=1733637182199; duration=0sec 2024-12-08T05:53:02,229 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T05:53:02,229 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 73c59f49873ab33a15eaa1ae94fa7685:info 2024-12-08T05:53:02,771 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:53:02,771 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:53:03,771 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:53:03,771 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:53:04,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40801 {}] regionserver.HRegion(8855): Flush requested on 73c59f49873ab33a15eaa1ae94fa7685 2024-12-08T05:53:04,199 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 73c59f49873ab33a15eaa1ae94fa7685 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-12-08T05:53:04,204 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp/info/92a4fe5d2c264bfeadb61095dd6494c0 is 1080, key is row0086/info:/1733637182179/Put/seqid=0 2024-12-08T05:53:04,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39251 is added to blk_1073741857_1033 (size=17896) 2024-12-08T05:53:04,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_1073741857_1033 (size=17896) 2024-12-08T05:53:04,209 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp/info/92a4fe5d2c264bfeadb61095dd6494c0 2024-12-08T05:53:04,215 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp/info/92a4fe5d2c264bfeadb61095dd6494c0 as hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/92a4fe5d2c264bfeadb61095dd6494c0 2024-12-08T05:53:04,220 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/92a4fe5d2c264bfeadb61095dd6494c0, entries=12, sequenceid=132, filesize=17.5 K 2024-12-08T05:53:04,221 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=13.66 KB/13988 for 73c59f49873ab33a15eaa1ae94fa7685 in 22ms, sequenceid=132, compaction requested=false 2024-12-08T05:53:04,221 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 73c59f49873ab33a15eaa1ae94fa7685: 2024-12-08T05:53:04,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40801 {}] regionserver.HRegion(8855): Flush requested on 73c59f49873ab33a15eaa1ae94fa7685 2024-12-08T05:53:04,222 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 73c59f49873ab33a15eaa1ae94fa7685 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-12-08T05:53:04,225 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp/info/1e4d60424e994ad4a4d3320f1a9afa7e is 1080, key is row0098/info:/1733637184200/Put/seqid=0 2024-12-08T05:53:04,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39251 is added to blk_1073741858_1034 (size=20078) 2024-12-08T05:53:04,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_1073741858_1034 (size=20078) 2024-12-08T05:53:04,231 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=149 (bloomFilter=true), to=hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp/info/1e4d60424e994ad4a4d3320f1a9afa7e 2024-12-08T05:53:04,237 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp/info/1e4d60424e994ad4a4d3320f1a9afa7e as hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/1e4d60424e994ad4a4d3320f1a9afa7e 2024-12-08T05:53:04,242 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/1e4d60424e994ad4a4d3320f1a9afa7e, entries=14, sequenceid=149, filesize=19.6 K 2024-12-08T05:53:04,243 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=12.61 KB/12912 for 73c59f49873ab33a15eaa1ae94fa7685 in 21ms, sequenceid=149, compaction requested=true 2024-12-08T05:53:04,243 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 73c59f49873ab33a15eaa1ae94fa7685: 2024-12-08T05:53:04,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40801 {}] regionserver.HRegion(8855): Flush requested on 73c59f49873ab33a15eaa1ae94fa7685 2024-12-08T05:53:04,243 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 73c59f49873ab33a15eaa1ae94fa7685:info, priority=-2147483648, current under compaction store size is 1 2024-12-08T05:53:04,243 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T05:53:04,243 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T05:53:04,243 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 73c59f49873ab33a15eaa1ae94fa7685 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-12-08T05:53:04,244 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 69080 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T05:53:04,244 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.HStore(1541): 73c59f49873ab33a15eaa1ae94fa7685/info is initiating minor compaction (all files) 2024-12-08T05:53:04,245 INFO [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 73c59f49873ab33a15eaa1ae94fa7685/info in TestLogRolling-testLogRolling,row0062,1733637170115.73c59f49873ab33a15eaa1ae94fa7685. 2024-12-08T05:53:04,245 INFO [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/22e0cc4a98334888ae8e8b6e482d8922, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/92a4fe5d2c264bfeadb61095dd6494c0, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/1e4d60424e994ad4a4d3320f1a9afa7e] into tmpdir=hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp, totalSize=67.5 K 2024-12-08T05:53:04,245 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] compactions.Compactor(225): Compacting 22e0cc4a98334888ae8e8b6e482d8922, keycount=24, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1733637170051 2024-12-08T05:53:04,245 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] compactions.Compactor(225): Compacting 92a4fe5d2c264bfeadb61095dd6494c0, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1733637182179 2024-12-08T05:53:04,246 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1e4d60424e994ad4a4d3320f1a9afa7e, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=149, earliestPutTs=1733637184200 2024-12-08T05:53:04,248 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp/info/65b41467e6424ff4ab293e2390e7186d is 1080, key is row0112/info:/1733637184223/Put/seqid=0 2024-12-08T05:53:04,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_1073741859_1035 (size=19000) 2024-12-08T05:53:04,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39251 is added to blk_1073741859_1035 (size=19000) 2024-12-08T05:53:04,253 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=165 (bloomFilter=true), to=hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp/info/65b41467e6424ff4ab293e2390e7186d 2024-12-08T05:53:04,259 INFO [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 73c59f49873ab33a15eaa1ae94fa7685#info#compaction#73 average throughput is 25.65 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T05:53:04,259 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp/info/65b41467e6424ff4ab293e2390e7186d as hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/65b41467e6424ff4ab293e2390e7186d 2024-12-08T05:53:04,259 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp/info/97391e3c66474b608c643b6eeadb8f69 is 1080, key is row0062/info:/1733637170051/Put/seqid=0 2024-12-08T05:53:04,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39251 is added to blk_1073741860_1036 (size=59266) 2024-12-08T05:53:04,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_1073741860_1036 (size=59266) 2024-12-08T05:53:04,265 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/65b41467e6424ff4ab293e2390e7186d, entries=13, sequenceid=165, filesize=18.6 K 2024-12-08T05:53:04,266 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=4.20 KB/4304 for 73c59f49873ab33a15eaa1ae94fa7685 in 23ms, sequenceid=165, compaction requested=false 2024-12-08T05:53:04,266 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 73c59f49873ab33a15eaa1ae94fa7685: 2024-12-08T05:53:04,270 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp/info/97391e3c66474b608c643b6eeadb8f69 as hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/97391e3c66474b608c643b6eeadb8f69 2024-12-08T05:53:04,275 INFO [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 73c59f49873ab33a15eaa1ae94fa7685/info of 73c59f49873ab33a15eaa1ae94fa7685 into 97391e3c66474b608c643b6eeadb8f69(size=57.9 K), total size for store is 76.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T05:53:04,275 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 73c59f49873ab33a15eaa1ae94fa7685: 2024-12-08T05:53:04,275 INFO [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733637170115.73c59f49873ab33a15eaa1ae94fa7685., storeName=73c59f49873ab33a15eaa1ae94fa7685/info, priority=13, startTime=1733637184243; duration=0sec 2024-12-08T05:53:04,275 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T05:53:04,275 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 73c59f49873ab33a15eaa1ae94fa7685:info 2024-12-08T05:53:04,772 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:53:04,772 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:53:05,773 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:53:05,773 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:53:06,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40801 {}] regionserver.HRegion(8855): Flush requested on 73c59f49873ab33a15eaa1ae94fa7685 2024-12-08T05:53:06,256 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 73c59f49873ab33a15eaa1ae94fa7685 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-08T05:53:06,260 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp/info/6d0d345b27c0419191db359eceb6b8c4 is 1080, key is row0125/info:/1733637184244/Put/seqid=0 2024-12-08T05:53:06,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_1073741861_1037 (size=12516) 2024-12-08T05:53:06,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39251 is added to blk_1073741861_1037 (size=12516) 2024-12-08T05:53:06,266 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=176 (bloomFilter=true), to=hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp/info/6d0d345b27c0419191db359eceb6b8c4 2024-12-08T05:53:06,271 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp/info/6d0d345b27c0419191db359eceb6b8c4 as hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/6d0d345b27c0419191db359eceb6b8c4 2024-12-08T05:53:06,276 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/6d0d345b27c0419191db359eceb6b8c4, entries=7, sequenceid=176, filesize=12.2 K 2024-12-08T05:53:06,277 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=14.71 KB/15064 for 73c59f49873ab33a15eaa1ae94fa7685 in 21ms, sequenceid=176, compaction requested=true 2024-12-08T05:53:06,277 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 73c59f49873ab33a15eaa1ae94fa7685: 2024-12-08T05:53:06,277 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 73c59f49873ab33a15eaa1ae94fa7685:info, priority=-2147483648, current under compaction store size is 1 2024-12-08T05:53:06,277 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T05:53:06,277 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T05:53:06,278 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 90782 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T05:53:06,278 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.HStore(1541): 73c59f49873ab33a15eaa1ae94fa7685/info is initiating minor compaction (all files) 2024-12-08T05:53:06,278 INFO [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 73c59f49873ab33a15eaa1ae94fa7685/info in TestLogRolling-testLogRolling,row0062,1733637170115.73c59f49873ab33a15eaa1ae94fa7685. 2024-12-08T05:53:06,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40801 {}] regionserver.HRegion(8855): Flush requested on 73c59f49873ab33a15eaa1ae94fa7685 2024-12-08T05:53:06,278 INFO [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/97391e3c66474b608c643b6eeadb8f69, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/65b41467e6424ff4ab293e2390e7186d, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/6d0d345b27c0419191db359eceb6b8c4] into tmpdir=hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp, totalSize=88.7 K 2024-12-08T05:53:06,278 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 73c59f49873ab33a15eaa1ae94fa7685 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-12-08T05:53:06,279 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] compactions.Compactor(225): Compacting 97391e3c66474b608c643b6eeadb8f69, keycount=50, bloomtype=ROW, size=57.9 K, encoding=NONE, compression=NONE, seqNum=149, earliestPutTs=1733637170051 2024-12-08T05:53:06,279 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] compactions.Compactor(225): Compacting 65b41467e6424ff4ab293e2390e7186d, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=165, earliestPutTs=1733637184223 2024-12-08T05:53:06,279 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6d0d345b27c0419191db359eceb6b8c4, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1733637184244 2024-12-08T05:53:06,283 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp/info/c1466d419b324aea8b9bef97e37a74d1 is 1080, key is row0132/info:/1733637186257/Put/seqid=0 2024-12-08T05:53:06,291 INFO [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 73c59f49873ab33a15eaa1ae94fa7685#info#compaction#76 average throughput is 35.92 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T05:53:06,292 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp/info/ca568a06708b448cbb202d389d3c775c is 1080, key is row0062/info:/1733637170051/Put/seqid=0 2024-12-08T05:53:06,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_1073741862_1038 (size=21156) 2024-12-08T05:53:06,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39251 is added to blk_1073741862_1038 (size=21156) 2024-12-08T05:53:06,295 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp/info/c1466d419b324aea8b9bef97e37a74d1 2024-12-08T05:53:06,300 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp/info/c1466d419b324aea8b9bef97e37a74d1 as hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/c1466d419b324aea8b9bef97e37a74d1 2024-12-08T05:53:06,305 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/c1466d419b324aea8b9bef97e37a74d1, entries=15, sequenceid=194, filesize=20.7 K 2024-12-08T05:53:06,307 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=14.71 KB/15064 for 73c59f49873ab33a15eaa1ae94fa7685 in 28ms, sequenceid=194, compaction requested=false 2024-12-08T05:53:06,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39251 is added to blk_1073741863_1039 (size=81065) 2024-12-08T05:53:06,307 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 73c59f49873ab33a15eaa1ae94fa7685: 2024-12-08T05:53:06,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_1073741863_1039 (size=81065) 2024-12-08T05:53:06,312 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp/info/ca568a06708b448cbb202d389d3c775c as hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/ca568a06708b448cbb202d389d3c775c 2024-12-08T05:53:06,317 INFO [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 73c59f49873ab33a15eaa1ae94fa7685/info of 73c59f49873ab33a15eaa1ae94fa7685 into ca568a06708b448cbb202d389d3c775c(size=79.2 K), total size for store is 99.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T05:53:06,317 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 73c59f49873ab33a15eaa1ae94fa7685: 2024-12-08T05:53:06,317 INFO [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733637170115.73c59f49873ab33a15eaa1ae94fa7685., storeName=73c59f49873ab33a15eaa1ae94fa7685/info, priority=13, startTime=1733637186277; duration=0sec 2024-12-08T05:53:06,317 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T05:53:06,318 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 73c59f49873ab33a15eaa1ae94fa7685:info 2024-12-08T05:53:06,774 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:53:06,774 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:53:06,968 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-08T05:53:07,774 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:53:07,774 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:53:08,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40801 {}] regionserver.HRegion(8855): Flush requested on 73c59f49873ab33a15eaa1ae94fa7685 2024-12-08T05:53:08,305 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 73c59f49873ab33a15eaa1ae94fa7685 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-12-08T05:53:08,310 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp/info/ae8942df44ca412d944c3130728560d5 is 1080, key is row0147/info:/1733637186279/Put/seqid=0 2024-12-08T05:53:08,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39251 is added to blk_1073741864_1040 (size=21156) 2024-12-08T05:53:08,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_1073741864_1040 (size=21156) 2024-12-08T05:53:08,315 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp/info/ae8942df44ca412d944c3130728560d5 2024-12-08T05:53:08,321 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp/info/ae8942df44ca412d944c3130728560d5 as hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/ae8942df44ca412d944c3130728560d5 2024-12-08T05:53:08,327 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/ae8942df44ca412d944c3130728560d5, entries=15, sequenceid=213, filesize=20.7 K 2024-12-08T05:53:08,328 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=12.61 KB/12912 for 73c59f49873ab33a15eaa1ae94fa7685 in 23ms, sequenceid=213, compaction requested=true 2024-12-08T05:53:08,328 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 73c59f49873ab33a15eaa1ae94fa7685: 2024-12-08T05:53:08,328 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 73c59f49873ab33a15eaa1ae94fa7685:info, priority=-2147483648, current under compaction store size is 1 2024-12-08T05:53:08,328 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T05:53:08,328 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T05:53:08,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40801 {}] regionserver.HRegion(8855): Flush requested on 73c59f49873ab33a15eaa1ae94fa7685 2024-12-08T05:53:08,328 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 73c59f49873ab33a15eaa1ae94fa7685 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-12-08T05:53:08,329 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 123377 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T05:53:08,329 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.HStore(1541): 73c59f49873ab33a15eaa1ae94fa7685/info is initiating minor compaction (all files) 2024-12-08T05:53:08,330 INFO [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 73c59f49873ab33a15eaa1ae94fa7685/info in TestLogRolling-testLogRolling,row0062,1733637170115.73c59f49873ab33a15eaa1ae94fa7685. 2024-12-08T05:53:08,330 INFO [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/ca568a06708b448cbb202d389d3c775c, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/c1466d419b324aea8b9bef97e37a74d1, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/ae8942df44ca412d944c3130728560d5] into tmpdir=hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp, totalSize=120.5 K 2024-12-08T05:53:08,330 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] compactions.Compactor(225): Compacting ca568a06708b448cbb202d389d3c775c, keycount=70, bloomtype=ROW, size=79.2 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1733637170051 2024-12-08T05:53:08,331 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] compactions.Compactor(225): Compacting c1466d419b324aea8b9bef97e37a74d1, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1733637186257 2024-12-08T05:53:08,331 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] compactions.Compactor(225): Compacting ae8942df44ca412d944c3130728560d5, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1733637186279 2024-12-08T05:53:08,334 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp/info/c6d54041dd1a4fa79677aa0dd813d257 is 1080, key is row0162/info:/1733637188306/Put/seqid=0 2024-12-08T05:53:08,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39251 is added to blk_1073741865_1041 (size=19000) 2024-12-08T05:53:08,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_1073741865_1041 (size=19000) 2024-12-08T05:53:08,341 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=229 (bloomFilter=true), to=hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp/info/c6d54041dd1a4fa79677aa0dd813d257 2024-12-08T05:53:08,348 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp/info/c6d54041dd1a4fa79677aa0dd813d257 as hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/c6d54041dd1a4fa79677aa0dd813d257 2024-12-08T05:53:08,349 INFO [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 73c59f49873ab33a15eaa1ae94fa7685#info#compaction#79 average throughput is 51.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T05:53:08,350 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp/info/eb70b1149e6e4029ac5f8aa6704a7463 is 1080, key is row0062/info:/1733637170051/Put/seqid=0 2024-12-08T05:53:08,354 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/c6d54041dd1a4fa79677aa0dd813d257, entries=13, sequenceid=229, filesize=18.6 K 2024-12-08T05:53:08,355 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=12.61 KB/12912 for 73c59f49873ab33a15eaa1ae94fa7685 in 27ms, sequenceid=229, compaction requested=false 2024-12-08T05:53:08,355 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 73c59f49873ab33a15eaa1ae94fa7685: 2024-12-08T05:53:08,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40801 {}] regionserver.HRegion(8855): Flush requested on 73c59f49873ab33a15eaa1ae94fa7685 2024-12-08T05:53:08,356 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 73c59f49873ab33a15eaa1ae94fa7685 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-12-08T05:53:08,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39251 is added to blk_1073741866_1042 (size=113515) 2024-12-08T05:53:08,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_1073741866_1042 (size=113515) 2024-12-08T05:53:08,374 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp/info/44bfeeecdce94e4c92da7f80c9439c29 is 1080, key is row0175/info:/1733637188330/Put/seqid=0 2024-12-08T05:53:08,380 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp/info/eb70b1149e6e4029ac5f8aa6704a7463 as hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/eb70b1149e6e4029ac5f8aa6704a7463 2024-12-08T05:53:08,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_1073741867_1043 (size=19000) 2024-12-08T05:53:08,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39251 is added to blk_1073741867_1043 (size=19000) 2024-12-08T05:53:08,381 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=245 (bloomFilter=true), to=hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp/info/44bfeeecdce94e4c92da7f80c9439c29 2024-12-08T05:53:08,386 INFO [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 73c59f49873ab33a15eaa1ae94fa7685/info of 73c59f49873ab33a15eaa1ae94fa7685 into eb70b1149e6e4029ac5f8aa6704a7463(size=110.9 K), total size for store is 129.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T05:53:08,386 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 73c59f49873ab33a15eaa1ae94fa7685: 2024-12-08T05:53:08,386 INFO [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733637170115.73c59f49873ab33a15eaa1ae94fa7685., storeName=73c59f49873ab33a15eaa1ae94fa7685/info, priority=13, startTime=1733637188328; duration=0sec 2024-12-08T05:53:08,386 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T05:53:08,386 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 73c59f49873ab33a15eaa1ae94fa7685:info 2024-12-08T05:53:08,386 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp/info/44bfeeecdce94e4c92da7f80c9439c29 as hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/44bfeeecdce94e4c92da7f80c9439c29 2024-12-08T05:53:08,391 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/44bfeeecdce94e4c92da7f80c9439c29, entries=13, sequenceid=245, filesize=18.6 K 2024-12-08T05:53:08,392 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=5.25 KB/5380 for 73c59f49873ab33a15eaa1ae94fa7685 in 36ms, sequenceid=245, compaction requested=true 2024-12-08T05:53:08,392 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 73c59f49873ab33a15eaa1ae94fa7685: 2024-12-08T05:53:08,392 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 73c59f49873ab33a15eaa1ae94fa7685:info, priority=-2147483648, current under compaction store size is 1 2024-12-08T05:53:08,392 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T05:53:08,392 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T05:53:08,393 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 151515 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T05:53:08,393 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.HStore(1541): 73c59f49873ab33a15eaa1ae94fa7685/info is initiating minor compaction (all files) 2024-12-08T05:53:08,393 INFO [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 73c59f49873ab33a15eaa1ae94fa7685/info in TestLogRolling-testLogRolling,row0062,1733637170115.73c59f49873ab33a15eaa1ae94fa7685. 2024-12-08T05:53:08,393 INFO [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/eb70b1149e6e4029ac5f8aa6704a7463, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/c6d54041dd1a4fa79677aa0dd813d257, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/44bfeeecdce94e4c92da7f80c9439c29] into tmpdir=hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp, totalSize=148.0 K 2024-12-08T05:53:08,394 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] compactions.Compactor(225): Compacting eb70b1149e6e4029ac5f8aa6704a7463, keycount=100, bloomtype=ROW, size=110.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1733637170051 2024-12-08T05:53:08,394 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] compactions.Compactor(225): Compacting c6d54041dd1a4fa79677aa0dd813d257, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=229, earliestPutTs=1733637188306 2024-12-08T05:53:08,394 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] compactions.Compactor(225): Compacting 44bfeeecdce94e4c92da7f80c9439c29, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1733637188330 2024-12-08T05:53:08,404 INFO [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 73c59f49873ab33a15eaa1ae94fa7685#info#compaction#81 average throughput is 64.65 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T05:53:08,404 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp/info/c5f2ccfb78294d29bf22ae881d3631f0 is 1080, key is row0062/info:/1733637170051/Put/seqid=0 2024-12-08T05:53:08,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39251 is added to blk_1073741868_1044 (size=141850) 2024-12-08T05:53:08,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_1073741868_1044 (size=141850) 2024-12-08T05:53:08,413 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp/info/c5f2ccfb78294d29bf22ae881d3631f0 as hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/c5f2ccfb78294d29bf22ae881d3631f0 2024-12-08T05:53:08,419 INFO [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 73c59f49873ab33a15eaa1ae94fa7685/info of 73c59f49873ab33a15eaa1ae94fa7685 into c5f2ccfb78294d29bf22ae881d3631f0(size=138.5 K), total size for store is 138.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T05:53:08,419 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 73c59f49873ab33a15eaa1ae94fa7685: 2024-12-08T05:53:08,419 INFO [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733637170115.73c59f49873ab33a15eaa1ae94fa7685., storeName=73c59f49873ab33a15eaa1ae94fa7685/info, priority=13, startTime=1733637188392; duration=0sec 2024-12-08T05:53:08,419 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T05:53:08,419 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 73c59f49873ab33a15eaa1ae94fa7685:info 2024-12-08T05:53:08,775 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:53:08,775 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:53:09,776 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:53:09,776 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:53:10,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40801 {}] regionserver.HRegion(8855): Flush requested on 73c59f49873ab33a15eaa1ae94fa7685 2024-12-08T05:53:10,384 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 73c59f49873ab33a15eaa1ae94fa7685 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-08T05:53:10,388 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp/info/d0da09843535468ea9111a167a018c9d is 1080, key is row0188/info:/1733637188357/Put/seqid=0 2024-12-08T05:53:10,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39251 is added to blk_1073741869_1045 (size=12517) 2024-12-08T05:53:10,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_1073741869_1045 (size=12517) 2024-12-08T05:53:10,395 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=257 (bloomFilter=true), to=hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp/info/d0da09843535468ea9111a167a018c9d 2024-12-08T05:53:10,401 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp/info/d0da09843535468ea9111a167a018c9d as hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/d0da09843535468ea9111a167a018c9d 2024-12-08T05:53:10,405 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/d0da09843535468ea9111a167a018c9d, entries=7, sequenceid=257, filesize=12.2 K 2024-12-08T05:53:10,406 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=15.76 KB/16140 for 73c59f49873ab33a15eaa1ae94fa7685 in 22ms, sequenceid=257, compaction requested=false 2024-12-08T05:53:10,406 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 73c59f49873ab33a15eaa1ae94fa7685: 2024-12-08T05:53:10,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40801 {}] regionserver.HRegion(8855): Flush requested on 73c59f49873ab33a15eaa1ae94fa7685 2024-12-08T05:53:10,407 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 73c59f49873ab33a15eaa1ae94fa7685 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-12-08T05:53:10,411 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp/info/0264c891fc4845828500a2459534fb84 is 1080, key is row0195/info:/1733637190385/Put/seqid=0 2024-12-08T05:53:10,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39251 is added to blk_1073741870_1046 (size=22254) 2024-12-08T05:53:10,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_1073741870_1046 (size=22254) 2024-12-08T05:53:10,417 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp/info/0264c891fc4845828500a2459534fb84 2024-12-08T05:53:10,422 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp/info/0264c891fc4845828500a2459534fb84 as hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/0264c891fc4845828500a2459534fb84 2024-12-08T05:53:10,428 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/0264c891fc4845828500a2459534fb84, entries=16, sequenceid=276, filesize=21.7 K 2024-12-08T05:53:10,429 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=13.66 KB/13988 for 73c59f49873ab33a15eaa1ae94fa7685 in 22ms, sequenceid=276, compaction requested=true 2024-12-08T05:53:10,429 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 73c59f49873ab33a15eaa1ae94fa7685: 2024-12-08T05:53:10,429 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 73c59f49873ab33a15eaa1ae94fa7685:info, priority=-2147483648, current under compaction store size is 1 2024-12-08T05:53:10,429 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T05:53:10,429 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T05:53:10,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40801 {}] regionserver.HRegion(8855): Flush requested on 73c59f49873ab33a15eaa1ae94fa7685 2024-12-08T05:53:10,430 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 73c59f49873ab33a15eaa1ae94fa7685 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-12-08T05:53:10,430 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 176621 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T05:53:10,430 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.HStore(1541): 73c59f49873ab33a15eaa1ae94fa7685/info is initiating minor compaction (all files) 2024-12-08T05:53:10,430 INFO [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 73c59f49873ab33a15eaa1ae94fa7685/info in TestLogRolling-testLogRolling,row0062,1733637170115.73c59f49873ab33a15eaa1ae94fa7685. 2024-12-08T05:53:10,430 INFO [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/c5f2ccfb78294d29bf22ae881d3631f0, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/d0da09843535468ea9111a167a018c9d, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/0264c891fc4845828500a2459534fb84] into tmpdir=hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp, totalSize=172.5 K 2024-12-08T05:53:10,431 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] compactions.Compactor(225): Compacting c5f2ccfb78294d29bf22ae881d3631f0, keycount=126, bloomtype=ROW, size=138.5 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1733637170051 2024-12-08T05:53:10,431 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] compactions.Compactor(225): Compacting d0da09843535468ea9111a167a018c9d, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=257, earliestPutTs=1733637188357 2024-12-08T05:53:10,432 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0264c891fc4845828500a2459534fb84, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1733637190385 2024-12-08T05:53:10,433 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp/info/69b43247408942a4927ccd2a34d9d8e0 is 1080, key is row0211/info:/1733637190408/Put/seqid=0 2024-12-08T05:53:10,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_1073741871_1047 (size=20092) 2024-12-08T05:53:10,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39251 is added to blk_1073741871_1047 (size=20092) 2024-12-08T05:53:10,442 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=293 (bloomFilter=true), to=hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp/info/69b43247408942a4927ccd2a34d9d8e0 2024-12-08T05:53:10,444 INFO [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 73c59f49873ab33a15eaa1ae94fa7685#info#compaction#85 average throughput is 50.97 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T05:53:10,444 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp/info/b2d2293de3fd4964bcbd811c8386aa25 is 1080, key is row0062/info:/1733637170051/Put/seqid=0 2024-12-08T05:53:10,448 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp/info/69b43247408942a4927ccd2a34d9d8e0 as hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/69b43247408942a4927ccd2a34d9d8e0 2024-12-08T05:53:10,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39251 is added to blk_1073741872_1048 (size=166767) 2024-12-08T05:53:10,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_1073741872_1048 (size=166767) 2024-12-08T05:53:10,454 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/69b43247408942a4927ccd2a34d9d8e0, entries=14, sequenceid=293, filesize=19.6 K 2024-12-08T05:53:10,455 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=0 B/0 for 73c59f49873ab33a15eaa1ae94fa7685 in 25ms, sequenceid=293, compaction requested=false 2024-12-08T05:53:10,455 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 73c59f49873ab33a15eaa1ae94fa7685: 2024-12-08T05:53:10,456 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp/info/b2d2293de3fd4964bcbd811c8386aa25 as hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/b2d2293de3fd4964bcbd811c8386aa25 2024-12-08T05:53:10,462 INFO [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 73c59f49873ab33a15eaa1ae94fa7685/info of 73c59f49873ab33a15eaa1ae94fa7685 into b2d2293de3fd4964bcbd811c8386aa25(size=162.9 K), total size for store is 182.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T05:53:10,462 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 73c59f49873ab33a15eaa1ae94fa7685: 2024-12-08T05:53:10,462 INFO [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733637170115.73c59f49873ab33a15eaa1ae94fa7685., storeName=73c59f49873ab33a15eaa1ae94fa7685/info, priority=13, startTime=1733637190429; duration=0sec 2024-12-08T05:53:10,462 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T05:53:10,462 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 73c59f49873ab33a15eaa1ae94fa7685:info 2024-12-08T05:53:10,776 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:53:10,776 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:53:11,777 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:53:11,777 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:53:12,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40801 {}] regionserver.HRegion(8855): Flush requested on 73c59f49873ab33a15eaa1ae94fa7685 2024-12-08T05:53:12,441 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 73c59f49873ab33a15eaa1ae94fa7685 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-08T05:53:12,445 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp/info/96a726a4c9e04df6b795c29761c57e2f is 1080, key is row0225/info:/1733637192431/Put/seqid=0 2024-12-08T05:53:12,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_1073741873_1049 (size=12523) 2024-12-08T05:53:12,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39251 is added to blk_1073741873_1049 (size=12523) 2024-12-08T05:53:12,451 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=304 (bloomFilter=true), to=hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp/info/96a726a4c9e04df6b795c29761c57e2f 2024-12-08T05:53:12,457 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp/info/96a726a4c9e04df6b795c29761c57e2f as hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/96a726a4c9e04df6b795c29761c57e2f 2024-12-08T05:53:12,462 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/96a726a4c9e04df6b795c29761c57e2f, entries=7, sequenceid=304, filesize=12.2 K 2024-12-08T05:53:12,463 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=15.76 KB/16140 for 73c59f49873ab33a15eaa1ae94fa7685 in 22ms, sequenceid=304, compaction requested=true 2024-12-08T05:53:12,463 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 73c59f49873ab33a15eaa1ae94fa7685: 2024-12-08T05:53:12,463 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 73c59f49873ab33a15eaa1ae94fa7685:info, priority=-2147483648, current under compaction store size is 1 2024-12-08T05:53:12,463 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T05:53:12,463 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T05:53:12,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40801 {}] regionserver.HRegion(8855): Flush requested on 73c59f49873ab33a15eaa1ae94fa7685 2024-12-08T05:53:12,464 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 73c59f49873ab33a15eaa1ae94fa7685 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-12-08T05:53:12,465 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 199382 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T05:53:12,465 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.HStore(1541): 73c59f49873ab33a15eaa1ae94fa7685/info is initiating minor compaction (all files) 2024-12-08T05:53:12,465 INFO [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 73c59f49873ab33a15eaa1ae94fa7685/info in TestLogRolling-testLogRolling,row0062,1733637170115.73c59f49873ab33a15eaa1ae94fa7685. 2024-12-08T05:53:12,465 INFO [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/b2d2293de3fd4964bcbd811c8386aa25, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/69b43247408942a4927ccd2a34d9d8e0, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/96a726a4c9e04df6b795c29761c57e2f] into tmpdir=hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp, totalSize=194.7 K 2024-12-08T05:53:12,465 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] compactions.Compactor(225): Compacting b2d2293de3fd4964bcbd811c8386aa25, keycount=149, bloomtype=ROW, size=162.9 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1733637170051 2024-12-08T05:53:12,466 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] compactions.Compactor(225): Compacting 69b43247408942a4927ccd2a34d9d8e0, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1733637190408 2024-12-08T05:53:12,466 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] compactions.Compactor(225): Compacting 96a726a4c9e04df6b795c29761c57e2f, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=304, earliestPutTs=1733637192431 2024-12-08T05:53:12,468 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp/info/72e237964b3e4c008164987446bfb3a6 is 1080, key is row0232/info:/1733637192442/Put/seqid=0 2024-12-08T05:53:12,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_1073741874_1050 (size=22254) 2024-12-08T05:53:12,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39251 is added to blk_1073741874_1050 (size=22254) 2024-12-08T05:53:12,480 INFO [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 73c59f49873ab33a15eaa1ae94fa7685#info#compaction#88 average throughput is 58.15 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T05:53:12,481 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=323 (bloomFilter=true), to=hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp/info/72e237964b3e4c008164987446bfb3a6 2024-12-08T05:53:12,481 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp/info/2ef3dd8dfca045ccad8087d858fe6277 is 1080, key is row0062/info:/1733637170051/Put/seqid=0 2024-12-08T05:53:12,486 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp/info/72e237964b3e4c008164987446bfb3a6 as hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/72e237964b3e4c008164987446bfb3a6 2024-12-08T05:53:12,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39251 is added to blk_1073741875_1051 (size=189536) 2024-12-08T05:53:12,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_1073741875_1051 (size=189536) 2024-12-08T05:53:12,491 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/72e237964b3e4c008164987446bfb3a6, entries=16, sequenceid=323, filesize=21.7 K 2024-12-08T05:53:12,492 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=9.46 KB/9684 for 73c59f49873ab33a15eaa1ae94fa7685 in 28ms, sequenceid=323, compaction requested=false 2024-12-08T05:53:12,492 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp/info/2ef3dd8dfca045ccad8087d858fe6277 as hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/2ef3dd8dfca045ccad8087d858fe6277 2024-12-08T05:53:12,492 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 73c59f49873ab33a15eaa1ae94fa7685: 2024-12-08T05:53:12,497 INFO [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 73c59f49873ab33a15eaa1ae94fa7685/info of 73c59f49873ab33a15eaa1ae94fa7685 into 2ef3dd8dfca045ccad8087d858fe6277(size=185.1 K), total size for store is 206.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T05:53:12,497 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 73c59f49873ab33a15eaa1ae94fa7685: 2024-12-08T05:53:12,497 INFO [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733637170115.73c59f49873ab33a15eaa1ae94fa7685., storeName=73c59f49873ab33a15eaa1ae94fa7685/info, priority=13, startTime=1733637192463; duration=0sec 2024-12-08T05:53:12,497 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T05:53:12,497 DEBUG [RS:0;0d942cb2025d:40801-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 73c59f49873ab33a15eaa1ae94fa7685:info 2024-12-08T05:53:12,778 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:53:12,778 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:53:13,779 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:53:13,779 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:53:14,480 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-12-08T05:53:14,481 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0d942cb2025d%2C40801%2C1733637157037.1733637194481 2024-12-08T05:53:14,486 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:53:14,486 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:53:14,486 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:53:14,486 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:53:14,486 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:53:14,487 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/WALs/0d942cb2025d,40801,1733637157037/0d942cb2025d%2C40801%2C1733637157037.1733637157414 with entries=313, filesize=308.60 KB; new WAL /user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/WALs/0d942cb2025d,40801,1733637157037/0d942cb2025d%2C40801%2C1733637157037.1733637194481 2024-12-08T05:53:14,488 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39661:39661),(127.0.0.1/127.0.0.1:36391:36391)] 2024-12-08T05:53:14,488 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/WALs/0d942cb2025d,40801,1733637157037/0d942cb2025d%2C40801%2C1733637157037.1733637157414 is not closed yet, will try archiving it next time 2024-12-08T05:53:14,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_1073741833_1009 (size=316019) 2024-12-08T05:53:14,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39251 is added to blk_1073741833_1009 (size=316019) 2024-12-08T05:53:14,492 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 73c59f49873ab33a15eaa1ae94fa7685 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-12-08T05:53:14,495 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp/info/f5f525e5b9b14712a109f717dc1c19ed is 1080, key is row0248/info:/1733637192465/Put/seqid=0 2024-12-08T05:53:14,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39251 is added to blk_1073741877_1053 (size=14681) 2024-12-08T05:53:14,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_1073741877_1053 (size=14681) 2024-12-08T05:53:14,500 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=336 (bloomFilter=true), to=hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp/info/f5f525e5b9b14712a109f717dc1c19ed 2024-12-08T05:53:14,505 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/.tmp/info/f5f525e5b9b14712a109f717dc1c19ed as hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/f5f525e5b9b14712a109f717dc1c19ed 2024-12-08T05:53:14,509 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/f5f525e5b9b14712a109f717dc1c19ed, entries=9, sequenceid=336, filesize=14.3 K 2024-12-08T05:53:14,510 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9684, heapSize ~10.36 KB/10608, currentSize=0 B/0 for 73c59f49873ab33a15eaa1ae94fa7685 in 18ms, sequenceid=336, compaction requested=true 2024-12-08T05:53:14,510 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 73c59f49873ab33a15eaa1ae94fa7685: 2024-12-08T05:53:14,510 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=705 B heapSize=2.05 KB 2024-12-08T05:53:14,514 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/hbase/meta/1588230740/.tmp/info/2ef415ad2ab64ab6b45ac1f74b62838e is 193, key is TestLogRolling-testLogRolling,row0062,1733637170115.73c59f49873ab33a15eaa1ae94fa7685./info:regioninfo/1733637170790/Put/seqid=0 2024-12-08T05:53:14,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39251 is added to blk_1073741878_1054 (size=6223) 2024-12-08T05:53:14,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_1073741878_1054 (size=6223) 2024-12-08T05:53:14,519 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=705 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/hbase/meta/1588230740/.tmp/info/2ef415ad2ab64ab6b45ac1f74b62838e 2024-12-08T05:53:14,524 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/hbase/meta/1588230740/.tmp/info/2ef415ad2ab64ab6b45ac1f74b62838e as hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/hbase/meta/1588230740/info/2ef415ad2ab64ab6b45ac1f74b62838e 2024-12-08T05:53:14,528 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/hbase/meta/1588230740/info/2ef415ad2ab64ab6b45ac1f74b62838e, entries=5, sequenceid=21, filesize=6.1 K 2024-12-08T05:53:14,529 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~705 B/705, heapSize ~1.29 KB/1320, currentSize=0 B/0 for 1588230740 in 19ms, sequenceid=21, compaction requested=false 2024-12-08T05:53:14,530 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-12-08T05:53:14,530 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for c1e62187e6586f93a537f3e69fd81df4: 2024-12-08T05:53:14,530 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0d942cb2025d%2C40801%2C1733637157037.1733637194530 2024-12-08T05:53:14,535 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:53:14,535 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:53:14,535 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:53:14,535 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:53:14,535 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:53:14,535 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/WALs/0d942cb2025d,40801,1733637157037/0d942cb2025d%2C40801%2C1733637157037.1733637194481 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/WALs/0d942cb2025d,40801,1733637157037/0d942cb2025d%2C40801%2C1733637157037.1733637194530 2024-12-08T05:53:14,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_1073741876_1052 (size=731) 2024-12-08T05:53:14,537 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39661:39661),(127.0.0.1/127.0.0.1:36391:36391)] 2024-12-08T05:53:14,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39251 is added to blk_1073741876_1052 (size=731) 2024-12-08T05:53:14,537 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/WALs/0d942cb2025d,40801,1733637157037/0d942cb2025d%2C40801%2C1733637157037.1733637194481 is not closed yet, will try archiving it next time 2024-12-08T05:53:14,537 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/WALs/0d942cb2025d,40801,1733637157037/0d942cb2025d%2C40801%2C1733637157037.1733637157414 to hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/oldWALs/0d942cb2025d%2C40801%2C1733637157037.1733637157414 2024-12-08T05:53:14,538 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-12-08T05:53:14,538 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-08T05:53:14,538 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-08T05:53:14,538 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T05:53:14,538 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/WALs/0d942cb2025d,40801,1733637157037/0d942cb2025d%2C40801%2C1733637157037.1733637194481 to hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/oldWALs/0d942cb2025d%2C40801%2C1733637157037.1733637194481 2024-12-08T05:53:14,538 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T05:53:14,539 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T05:53:14,539 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-08T05:53:14,539 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-08T05:53:14,539 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1338999765, stopped=false 2024-12-08T05:53:14,539 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=0d942cb2025d,44379,1733637156986 2024-12-08T05:53:14,540 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44379-0x10190a19af90000, quorum=127.0.0.1:58794, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T05:53:14,540 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40801-0x10190a19af90001, quorum=127.0.0.1:58794, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T05:53:14,540 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40801-0x10190a19af90001, quorum=127.0.0.1:58794, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:53:14,540 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44379-0x10190a19af90000, quorum=127.0.0.1:58794, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:53:14,540 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-08T05:53:14,541 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-08T05:53:14,541 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T05:53:14,541 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T05:53:14,541 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:44379-0x10190a19af90000, quorum=127.0.0.1:58794, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T05:53:14,541 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40801-0x10190a19af90001, quorum=127.0.0.1:58794, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T05:53:14,542 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '0d942cb2025d,40801,1733637157037' ***** 2024-12-08T05:53:14,542 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-08T05:53:14,542 INFO [RS:0;0d942cb2025d:40801 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-08T05:53:14,542 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-08T05:53:14,542 INFO [RS:0;0d942cb2025d:40801 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-08T05:53:14,542 INFO [RS:0;0d942cb2025d:40801 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-08T05:53:14,542 INFO [RS:0;0d942cb2025d:40801 {}] regionserver.HRegionServer(3091): Received CLOSE for 73c59f49873ab33a15eaa1ae94fa7685 2024-12-08T05:53:14,542 INFO [RS:0;0d942cb2025d:40801 {}] regionserver.HRegionServer(3091): Received CLOSE for c1e62187e6586f93a537f3e69fd81df4 2024-12-08T05:53:14,542 INFO [RS:0;0d942cb2025d:40801 {}] regionserver.HRegionServer(959): stopping server 0d942cb2025d,40801,1733637157037 2024-12-08T05:53:14,542 INFO [RS:0;0d942cb2025d:40801 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T05:53:14,543 INFO [RS:0;0d942cb2025d:40801 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;0d942cb2025d:40801. 2024-12-08T05:53:14,543 DEBUG [RS:0;0d942cb2025d:40801 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T05:53:14,543 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 73c59f49873ab33a15eaa1ae94fa7685, disabling compactions & flushes 2024-12-08T05:53:14,543 DEBUG [RS:0;0d942cb2025d:40801 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T05:53:14,543 INFO [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1733637170115.73c59f49873ab33a15eaa1ae94fa7685. 2024-12-08T05:53:14,543 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1733637170115.73c59f49873ab33a15eaa1ae94fa7685. 2024-12-08T05:53:14,543 INFO [RS:0;0d942cb2025d:40801 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-08T05:53:14,543 INFO [RS:0;0d942cb2025d:40801 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-08T05:53:14,543 INFO [RS:0;0d942cb2025d:40801 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-08T05:53:14,543 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1733637170115.73c59f49873ab33a15eaa1ae94fa7685. after waiting 0 ms 2024-12-08T05:53:14,543 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1733637170115.73c59f49873ab33a15eaa1ae94fa7685. 2024-12-08T05:53:14,543 INFO [RS:0;0d942cb2025d:40801 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-08T05:53:14,543 INFO [RS:0;0d942cb2025d:40801 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-12-08T05:53:14,543 DEBUG [RS:0;0d942cb2025d:40801 {}] regionserver.HRegionServer(1325): Online Regions={73c59f49873ab33a15eaa1ae94fa7685=TestLogRolling-testLogRolling,row0062,1733637170115.73c59f49873ab33a15eaa1ae94fa7685., 1588230740=hbase:meta,,1.1588230740, c1e62187e6586f93a537f3e69fd81df4=TestLogRolling-testLogRolling,,1733637170115.c1e62187e6586f93a537f3e69fd81df4.} 2024-12-08T05:53:14,543 DEBUG [RS:0;0d942cb2025d:40801 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 73c59f49873ab33a15eaa1ae94fa7685, c1e62187e6586f93a537f3e69fd81df4 2024-12-08T05:53:14,543 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-08T05:53:14,543 INFO [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-08T05:53:14,543 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-08T05:53:14,543 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-08T05:53:14,543 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-08T05:53:14,543 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733637170115.73c59f49873ab33a15eaa1ae94fa7685.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/fe9a01eb86a94ef08c3c3bb52db23e04.438a8117f857d231782152090e27cd27->hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/info/fe9a01eb86a94ef08c3c3bb52db23e04-top, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/0f0ed446a47840619a15d2d9f922e9bc, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/TestLogRolling-testLogRolling=438a8117f857d231782152090e27cd27-d0fc9c10e23640c89cc09f6d8cbf3851, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/5882021201cc4ca0b4cbd23116c2a77f, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/22e0cc4a98334888ae8e8b6e482d8922, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/45fb0bdb87ff4589bd88a22a84540a08, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/92a4fe5d2c264bfeadb61095dd6494c0, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/97391e3c66474b608c643b6eeadb8f69, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/1e4d60424e994ad4a4d3320f1a9afa7e, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/65b41467e6424ff4ab293e2390e7186d, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/ca568a06708b448cbb202d389d3c775c, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/6d0d345b27c0419191db359eceb6b8c4, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/c1466d419b324aea8b9bef97e37a74d1, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/eb70b1149e6e4029ac5f8aa6704a7463, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/ae8942df44ca412d944c3130728560d5, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/c6d54041dd1a4fa79677aa0dd813d257, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/c5f2ccfb78294d29bf22ae881d3631f0, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/44bfeeecdce94e4c92da7f80c9439c29, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/d0da09843535468ea9111a167a018c9d, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/b2d2293de3fd4964bcbd811c8386aa25, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/0264c891fc4845828500a2459534fb84, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/69b43247408942a4927ccd2a34d9d8e0, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/96a726a4c9e04df6b795c29761c57e2f] to archive 2024-12-08T05:53:14,544 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733637170115.73c59f49873ab33a15eaa1ae94fa7685.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-08T05:53:14,546 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733637170115.73c59f49873ab33a15eaa1ae94fa7685.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/fe9a01eb86a94ef08c3c3bb52db23e04.438a8117f857d231782152090e27cd27 to hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/archive/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/fe9a01eb86a94ef08c3c3bb52db23e04.438a8117f857d231782152090e27cd27 2024-12-08T05:53:14,547 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733637170115.73c59f49873ab33a15eaa1ae94fa7685.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/0f0ed446a47840619a15d2d9f922e9bc to hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/archive/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/0f0ed446a47840619a15d2d9f922e9bc 2024-12-08T05:53:14,548 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733637170115.73c59f49873ab33a15eaa1ae94fa7685.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/TestLogRolling-testLogRolling=438a8117f857d231782152090e27cd27-d0fc9c10e23640c89cc09f6d8cbf3851 to hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/archive/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/TestLogRolling-testLogRolling=438a8117f857d231782152090e27cd27-d0fc9c10e23640c89cc09f6d8cbf3851 2024-12-08T05:53:14,549 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733637170115.73c59f49873ab33a15eaa1ae94fa7685.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/5882021201cc4ca0b4cbd23116c2a77f to hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/archive/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/5882021201cc4ca0b4cbd23116c2a77f 2024-12-08T05:53:14,550 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733637170115.73c59f49873ab33a15eaa1ae94fa7685.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/22e0cc4a98334888ae8e8b6e482d8922 to hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/archive/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/22e0cc4a98334888ae8e8b6e482d8922 2024-12-08T05:53:14,551 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-12-08T05:53:14,551 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733637170115.73c59f49873ab33a15eaa1ae94fa7685.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/45fb0bdb87ff4589bd88a22a84540a08 to hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/archive/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/45fb0bdb87ff4589bd88a22a84540a08 2024-12-08T05:53:14,552 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-08T05:53:14,552 INFO [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-08T05:53:14,552 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733637194543Running coprocessor pre-close hooks at 1733637194543Disabling compacts and flushes for region at 1733637194543Disabling writes for close at 1733637194543Writing region close event to WAL at 1733637194548 (+5 ms)Running coprocessor post-close hooks at 1733637194552 (+4 ms)Closed at 1733637194552 2024-12-08T05:53:14,552 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-08T05:53:14,553 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733637170115.73c59f49873ab33a15eaa1ae94fa7685.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/92a4fe5d2c264bfeadb61095dd6494c0 to hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/archive/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/92a4fe5d2c264bfeadb61095dd6494c0 2024-12-08T05:53:14,554 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733637170115.73c59f49873ab33a15eaa1ae94fa7685.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/97391e3c66474b608c643b6eeadb8f69 to hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/archive/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/97391e3c66474b608c643b6eeadb8f69 2024-12-08T05:53:14,555 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733637170115.73c59f49873ab33a15eaa1ae94fa7685.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/1e4d60424e994ad4a4d3320f1a9afa7e to hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/archive/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/1e4d60424e994ad4a4d3320f1a9afa7e 2024-12-08T05:53:14,556 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733637170115.73c59f49873ab33a15eaa1ae94fa7685.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/65b41467e6424ff4ab293e2390e7186d to hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/archive/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/65b41467e6424ff4ab293e2390e7186d 2024-12-08T05:53:14,557 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733637170115.73c59f49873ab33a15eaa1ae94fa7685.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/ca568a06708b448cbb202d389d3c775c to hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/archive/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/ca568a06708b448cbb202d389d3c775c 2024-12-08T05:53:14,558 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733637170115.73c59f49873ab33a15eaa1ae94fa7685.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/6d0d345b27c0419191db359eceb6b8c4 to hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/archive/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/6d0d345b27c0419191db359eceb6b8c4 2024-12-08T05:53:14,559 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733637170115.73c59f49873ab33a15eaa1ae94fa7685.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/c1466d419b324aea8b9bef97e37a74d1 to hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/archive/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/c1466d419b324aea8b9bef97e37a74d1 2024-12-08T05:53:14,560 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733637170115.73c59f49873ab33a15eaa1ae94fa7685.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/eb70b1149e6e4029ac5f8aa6704a7463 to hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/archive/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/eb70b1149e6e4029ac5f8aa6704a7463 2024-12-08T05:53:14,561 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733637170115.73c59f49873ab33a15eaa1ae94fa7685.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/ae8942df44ca412d944c3130728560d5 to hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/archive/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/ae8942df44ca412d944c3130728560d5 2024-12-08T05:53:14,562 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733637170115.73c59f49873ab33a15eaa1ae94fa7685.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/c6d54041dd1a4fa79677aa0dd813d257 to hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/archive/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/c6d54041dd1a4fa79677aa0dd813d257 2024-12-08T05:53:14,563 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733637170115.73c59f49873ab33a15eaa1ae94fa7685.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/c5f2ccfb78294d29bf22ae881d3631f0 to hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/archive/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/c5f2ccfb78294d29bf22ae881d3631f0 2024-12-08T05:53:14,564 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733637170115.73c59f49873ab33a15eaa1ae94fa7685.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/44bfeeecdce94e4c92da7f80c9439c29 to hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/archive/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/44bfeeecdce94e4c92da7f80c9439c29 2024-12-08T05:53:14,565 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733637170115.73c59f49873ab33a15eaa1ae94fa7685.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/d0da09843535468ea9111a167a018c9d to hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/archive/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/d0da09843535468ea9111a167a018c9d 2024-12-08T05:53:14,567 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733637170115.73c59f49873ab33a15eaa1ae94fa7685.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/b2d2293de3fd4964bcbd811c8386aa25 to hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/archive/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/b2d2293de3fd4964bcbd811c8386aa25 2024-12-08T05:53:14,568 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733637170115.73c59f49873ab33a15eaa1ae94fa7685.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/0264c891fc4845828500a2459534fb84 to hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/archive/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/0264c891fc4845828500a2459534fb84 2024-12-08T05:53:14,569 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733637170115.73c59f49873ab33a15eaa1ae94fa7685.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/69b43247408942a4927ccd2a34d9d8e0 to hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/archive/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/69b43247408942a4927ccd2a34d9d8e0 2024-12-08T05:53:14,570 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733637170115.73c59f49873ab33a15eaa1ae94fa7685.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/96a726a4c9e04df6b795c29761c57e2f to hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/archive/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/info/96a726a4c9e04df6b795c29761c57e2f 2024-12-08T05:53:14,570 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733637170115.73c59f49873ab33a15eaa1ae94fa7685.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=0d942cb2025d:44379 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-12-08T05:53:14,571 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1733637170115.73c59f49873ab33a15eaa1ae94fa7685.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [0f0ed446a47840619a15d2d9f922e9bc=8359, 5882021201cc4ca0b4cbd23116c2a77f=12509, 22e0cc4a98334888ae8e8b6e482d8922=31106, 45fb0bdb87ff4589bd88a22a84540a08=20064, 92a4fe5d2c264bfeadb61095dd6494c0=17896, 97391e3c66474b608c643b6eeadb8f69=59266, 1e4d60424e994ad4a4d3320f1a9afa7e=20078, 65b41467e6424ff4ab293e2390e7186d=19000, ca568a06708b448cbb202d389d3c775c=81065, 6d0d345b27c0419191db359eceb6b8c4=12516, c1466d419b324aea8b9bef97e37a74d1=21156, eb70b1149e6e4029ac5f8aa6704a7463=113515, ae8942df44ca412d944c3130728560d5=21156, c6d54041dd1a4fa79677aa0dd813d257=19000, c5f2ccfb78294d29bf22ae881d3631f0=141850, 44bfeeecdce94e4c92da7f80c9439c29=19000, d0da09843535468ea9111a167a018c9d=12517, b2d2293de3fd4964bcbd811c8386aa25=166767, 0264c891fc4845828500a2459534fb84=22254, 69b43247408942a4927ccd2a34d9d8e0=20092, 96a726a4c9e04df6b795c29761c57e2f=12523] 2024-12-08T05:53:14,574 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/73c59f49873ab33a15eaa1ae94fa7685/recovered.edits/339.seqid, newMaxSeqId=339, maxSeqId=88 2024-12-08T05:53:14,575 INFO [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1733637170115.73c59f49873ab33a15eaa1ae94fa7685. 2024-12-08T05:53:14,575 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 73c59f49873ab33a15eaa1ae94fa7685: Waiting for close lock at 1733637194542Running coprocessor pre-close hooks at 1733637194543 (+1 ms)Disabling compacts and flushes for region at 1733637194543Disabling writes for close at 1733637194543Writing region close event to WAL at 1733637194571 (+28 ms)Running coprocessor post-close hooks at 1733637194575 (+4 ms)Closed at 1733637194575 2024-12-08T05:53:14,575 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1733637170115.73c59f49873ab33a15eaa1ae94fa7685. 2024-12-08T05:53:14,575 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing c1e62187e6586f93a537f3e69fd81df4, disabling compactions & flushes 2024-12-08T05:53:14,575 INFO [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1733637170115.c1e62187e6586f93a537f3e69fd81df4. 2024-12-08T05:53:14,575 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1733637170115.c1e62187e6586f93a537f3e69fd81df4. 2024-12-08T05:53:14,575 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1733637170115.c1e62187e6586f93a537f3e69fd81df4. after waiting 0 ms 2024-12-08T05:53:14,575 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1733637170115.c1e62187e6586f93a537f3e69fd81df4. 2024-12-08T05:53:14,575 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733637170115.c1e62187e6586f93a537f3e69fd81df4.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/c1e62187e6586f93a537f3e69fd81df4/info/fe9a01eb86a94ef08c3c3bb52db23e04.438a8117f857d231782152090e27cd27->hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/438a8117f857d231782152090e27cd27/info/fe9a01eb86a94ef08c3c3bb52db23e04-bottom] to archive 2024-12-08T05:53:14,576 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733637170115.c1e62187e6586f93a537f3e69fd81df4.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-08T05:53:14,577 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733637170115.c1e62187e6586f93a537f3e69fd81df4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/c1e62187e6586f93a537f3e69fd81df4/info/fe9a01eb86a94ef08c3c3bb52db23e04.438a8117f857d231782152090e27cd27 to hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/archive/data/default/TestLogRolling-testLogRolling/c1e62187e6586f93a537f3e69fd81df4/info/fe9a01eb86a94ef08c3c3bb52db23e04.438a8117f857d231782152090e27cd27 2024-12-08T05:53:14,577 WARN [StoreCloser-TestLogRolling-testLogRolling,,1733637170115.c1e62187e6586f93a537f3e69fd81df4.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-12-08T05:53:14,581 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/data/default/TestLogRolling-testLogRolling/c1e62187e6586f93a537f3e69fd81df4/recovered.edits/93.seqid, newMaxSeqId=93, maxSeqId=88 2024-12-08T05:53:14,581 INFO [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1733637170115.c1e62187e6586f93a537f3e69fd81df4. 2024-12-08T05:53:14,581 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for c1e62187e6586f93a537f3e69fd81df4: Waiting for close lock at 1733637194575Running coprocessor pre-close hooks at 1733637194575Disabling compacts and flushes for region at 1733637194575Disabling writes for close at 1733637194575Writing region close event to WAL at 1733637194578 (+3 ms)Running coprocessor post-close hooks at 1733637194581 (+3 ms)Closed at 1733637194581 2024-12-08T05:53:14,581 DEBUG [RS_CLOSE_REGION-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1733637170115.c1e62187e6586f93a537f3e69fd81df4. 2024-12-08T05:53:14,743 INFO [RS:0;0d942cb2025d:40801 {}] regionserver.HRegionServer(976): stopping server 0d942cb2025d,40801,1733637157037; all regions closed. 2024-12-08T05:53:14,744 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:53:14,744 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:53:14,744 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:53:14,744 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:53:14,744 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:53:14,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39251 is added to blk_1073741834_1010 (size=8107) 2024-12-08T05:53:14,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_1073741834_1010 (size=8107) 2024-12-08T05:53:14,748 DEBUG [RS:0;0d942cb2025d:40801 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/oldWALs 2024-12-08T05:53:14,748 INFO [RS:0;0d942cb2025d:40801 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 0d942cb2025d%2C40801%2C1733637157037.meta:.meta(num 1733637157782) 2024-12-08T05:53:14,749 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:53:14,749 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:53:14,749 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:53:14,749 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:53:14,749 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:53:14,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_1073741879_1055 (size=778) 2024-12-08T05:53:14,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39251 is added to blk_1073741879_1055 (size=778) 2024-12-08T05:53:14,753 DEBUG [RS:0;0d942cb2025d:40801 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/oldWALs 2024-12-08T05:53:14,753 INFO [RS:0;0d942cb2025d:40801 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 0d942cb2025d%2C40801%2C1733637157037:(num 1733637194530) 2024-12-08T05:53:14,753 DEBUG [RS:0;0d942cb2025d:40801 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T05:53:14,753 INFO [RS:0;0d942cb2025d:40801 {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T05:53:14,753 INFO [RS:0;0d942cb2025d:40801 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T05:53:14,754 INFO [RS:0;0d942cb2025d:40801 {}] hbase.ChoreService(370): Chore service for: regionserver/0d942cb2025d:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-08T05:53:14,754 INFO [RS:0;0d942cb2025d:40801 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T05:53:14,754 INFO [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T05:53:14,754 INFO [RS:0;0d942cb2025d:40801 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40801 2024-12-08T05:53:14,756 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40801-0x10190a19af90001, quorum=127.0.0.1:58794, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/0d942cb2025d,40801,1733637157037 2024-12-08T05:53:14,756 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44379-0x10190a19af90000, quorum=127.0.0.1:58794, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T05:53:14,756 INFO [RS:0;0d942cb2025d:40801 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T05:53:14,757 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [0d942cb2025d,40801,1733637157037] 2024-12-08T05:53:14,759 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/0d942cb2025d,40801,1733637157037 already deleted, retry=false 2024-12-08T05:53:14,759 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 0d942cb2025d,40801,1733637157037 expired; onlineServers=0 2024-12-08T05:53:14,759 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '0d942cb2025d,44379,1733637156986' ***** 2024-12-08T05:53:14,759 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-08T05:53:14,759 INFO [M:0;0d942cb2025d:44379 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T05:53:14,759 INFO [M:0;0d942cb2025d:44379 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T05:53:14,759 DEBUG [M:0;0d942cb2025d:44379 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-08T05:53:14,759 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-08T05:53:14,759 DEBUG [M:0;0d942cb2025d:44379 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-08T05:53:14,759 DEBUG [master/0d942cb2025d:0:becomeActiveMaster-HFileCleaner.large.0-1733637157177 {}] cleaner.HFileCleaner(306): Exit Thread[master/0d942cb2025d:0:becomeActiveMaster-HFileCleaner.large.0-1733637157177,5,FailOnTimeoutGroup] 2024-12-08T05:53:14,759 DEBUG [master/0d942cb2025d:0:becomeActiveMaster-HFileCleaner.small.0-1733637157177 {}] cleaner.HFileCleaner(306): Exit Thread[master/0d942cb2025d:0:becomeActiveMaster-HFileCleaner.small.0-1733637157177,5,FailOnTimeoutGroup] 2024-12-08T05:53:14,760 INFO [M:0;0d942cb2025d:44379 {}] hbase.ChoreService(370): Chore service for: master/0d942cb2025d:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-08T05:53:14,760 INFO [M:0;0d942cb2025d:44379 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T05:53:14,760 DEBUG [M:0;0d942cb2025d:44379 {}] master.HMaster(1795): Stopping service threads 2024-12-08T05:53:14,760 INFO [M:0;0d942cb2025d:44379 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-08T05:53:14,760 INFO [M:0;0d942cb2025d:44379 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-08T05:53:14,760 INFO [M:0;0d942cb2025d:44379 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-08T05:53:14,760 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-08T05:53:14,761 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44379-0x10190a19af90000, quorum=127.0.0.1:58794, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-08T05:53:14,761 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44379-0x10190a19af90000, quorum=127.0.0.1:58794, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:53:14,761 DEBUG [M:0;0d942cb2025d:44379 {}] zookeeper.ZKUtil(347): master:44379-0x10190a19af90000, quorum=127.0.0.1:58794, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-08T05:53:14,761 WARN [M:0;0d942cb2025d:44379 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-08T05:53:14,761 INFO [M:0;0d942cb2025d:44379 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/.lastflushedseqids 2024-12-08T05:53:14,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_1073741880_1056 (size=228) 2024-12-08T05:53:14,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39251 is added to blk_1073741880_1056 (size=228) 2024-12-08T05:53:14,767 INFO [M:0;0d942cb2025d:44379 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-08T05:53:14,767 INFO [M:0;0d942cb2025d:44379 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-08T05:53:14,767 DEBUG [M:0;0d942cb2025d:44379 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-08T05:53:14,767 INFO [M:0;0d942cb2025d:44379 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:53:14,767 DEBUG [M:0;0d942cb2025d:44379 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:53:14,767 DEBUG [M:0;0d942cb2025d:44379 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-08T05:53:14,767 DEBUG [M:0;0d942cb2025d:44379 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:53:14,767 INFO [M:0;0d942cb2025d:44379 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.41 KB heapSize=63.33 KB 2024-12-08T05:53:14,779 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:53:14,779 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:53:14,783 DEBUG [M:0;0d942cb2025d:44379 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/2e7f98b8ff6b48bc8f22f08196721cdd is 82, key is hbase:meta,,1/info:regioninfo/1733637157804/Put/seqid=0 2024-12-08T05:53:14,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_1073741881_1057 (size=5672) 2024-12-08T05:53:14,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39251 is added to blk_1073741881_1057 (size=5672) 2024-12-08T05:53:14,787 INFO [M:0;0d942cb2025d:44379 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/2e7f98b8ff6b48bc8f22f08196721cdd 2024-12-08T05:53:14,804 DEBUG [M:0;0d942cb2025d:44379 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/cd6860ea53264e848aff28d0309c759e is 749, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733637158629/Put/seqid=0 2024-12-08T05:53:14,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39251 is added to blk_1073741882_1058 (size=7089) 2024-12-08T05:53:14,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_1073741882_1058 (size=7089) 2024-12-08T05:53:14,812 INFO [M:0;0d942cb2025d:44379 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.80 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/cd6860ea53264e848aff28d0309c759e 2024-12-08T05:53:14,816 INFO [M:0;0d942cb2025d:44379 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for cd6860ea53264e848aff28d0309c759e 2024-12-08T05:53:14,830 DEBUG [M:0;0d942cb2025d:44379 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f34411646e78482aabfd671057bce998 is 69, key is 0d942cb2025d,40801,1733637157037/rs:state/1733637157270/Put/seqid=0 2024-12-08T05:53:14,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39251 is added to blk_1073741883_1059 (size=5156) 2024-12-08T05:53:14,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_1073741883_1059 (size=5156) 2024-12-08T05:53:14,835 INFO [M:0;0d942cb2025d:44379 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f34411646e78482aabfd671057bce998 2024-12-08T05:53:14,852 DEBUG [M:0;0d942cb2025d:44379 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/eba7901485e041a2a478297dc0da5e7c is 52, key is load_balancer_on/state:d/1733637157861/Put/seqid=0 2024-12-08T05:53:14,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39251 is added to blk_1073741884_1060 (size=5056) 2024-12-08T05:53:14,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_1073741884_1060 (size=5056) 2024-12-08T05:53:14,857 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40801-0x10190a19af90001, quorum=127.0.0.1:58794, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T05:53:14,857 INFO [RS:0;0d942cb2025d:40801 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T05:53:14,857 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40801-0x10190a19af90001, quorum=127.0.0.1:58794, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T05:53:14,857 INFO [RS:0;0d942cb2025d:40801 {}] regionserver.HRegionServer(1031): Exiting; stopping=0d942cb2025d,40801,1733637157037; zookeeper connection closed. 2024-12-08T05:53:14,857 INFO [M:0;0d942cb2025d:44379 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/eba7901485e041a2a478297dc0da5e7c 2024-12-08T05:53:14,857 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7e4ec685 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7e4ec685 2024-12-08T05:53:14,858 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-08T05:53:14,861 DEBUG [M:0;0d942cb2025d:44379 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/2e7f98b8ff6b48bc8f22f08196721cdd as hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/2e7f98b8ff6b48bc8f22f08196721cdd 2024-12-08T05:53:14,865 INFO [M:0;0d942cb2025d:44379 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/2e7f98b8ff6b48bc8f22f08196721cdd, entries=8, sequenceid=125, filesize=5.5 K 2024-12-08T05:53:14,866 DEBUG [M:0;0d942cb2025d:44379 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/cd6860ea53264e848aff28d0309c759e as hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/cd6860ea53264e848aff28d0309c759e 2024-12-08T05:53:14,869 INFO [M:0;0d942cb2025d:44379 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for cd6860ea53264e848aff28d0309c759e 2024-12-08T05:53:14,870 INFO [M:0;0d942cb2025d:44379 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/cd6860ea53264e848aff28d0309c759e, entries=13, sequenceid=125, filesize=6.9 K 2024-12-08T05:53:14,870 DEBUG [M:0;0d942cb2025d:44379 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f34411646e78482aabfd671057bce998 as hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/f34411646e78482aabfd671057bce998 2024-12-08T05:53:14,874 INFO [M:0;0d942cb2025d:44379 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/f34411646e78482aabfd671057bce998, entries=1, sequenceid=125, filesize=5.0 K 2024-12-08T05:53:14,875 DEBUG [M:0;0d942cb2025d:44379 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/eba7901485e041a2a478297dc0da5e7c as hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/eba7901485e041a2a478297dc0da5e7c 2024-12-08T05:53:14,879 INFO [M:0;0d942cb2025d:44379 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45689/user/jenkins/test-data/5785c3a4-658d-03ab-0c59-b7e1210595b0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/eba7901485e041a2a478297dc0da5e7c, entries=1, sequenceid=125, filesize=4.9 K 2024-12-08T05:53:14,880 INFO [M:0;0d942cb2025d:44379 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.41 KB/52639, heapSize ~63.27 KB/64784, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 113ms, sequenceid=125, compaction requested=false 2024-12-08T05:53:14,881 INFO [M:0;0d942cb2025d:44379 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:53:14,881 DEBUG [M:0;0d942cb2025d:44379 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733637194767Disabling compacts and flushes for region at 1733637194767Disabling writes for close at 1733637194767Obtaining lock to block concurrent updates at 1733637194767Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733637194767Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52639, getHeapSize=64784, getOffHeapSize=0, getCellsCount=148 at 1733637194768 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733637194768Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733637194768Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733637194782 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733637194783 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733637194791 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733637194804 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733637194804Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733637194816 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733637194829 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733637194829Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733637194839 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733637194852 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733637194852Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@443f34c8: reopening flushed file at 1733637194861 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3f819344: reopening flushed file at 1733637194865 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4d2aff49: reopening flushed file at 1733637194870 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@118e290f: reopening flushed file at 1733637194875 (+5 ms)Finished flush of dataSize ~51.41 KB/52639, heapSize ~63.27 KB/64784, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 113ms, sequenceid=125, compaction requested=false at 1733637194880 (+5 ms)Writing region close event to WAL at 1733637194881 (+1 ms)Closed at 1733637194881 2024-12-08T05:53:14,881 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:53:14,881 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:53:14,882 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:53:14,882 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:53:14,882 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:53:14,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39251 is added to blk_1073741830_1006 (size=61308) 2024-12-08T05:53:14,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41373 is added to blk_1073741830_1006 (size=61308) 2024-12-08T05:53:14,884 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T05:53:14,884 INFO [M:0;0d942cb2025d:44379 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-08T05:53:14,884 INFO [M:0;0d942cb2025d:44379 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44379 2024-12-08T05:53:14,884 INFO [M:0;0d942cb2025d:44379 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T05:53:14,989 INFO [M:0;0d942cb2025d:44379 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T05:53:14,989 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44379-0x10190a19af90000, quorum=127.0.0.1:58794, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T05:53:14,990 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44379-0x10190a19af90000, quorum=127.0.0.1:58794, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T05:53:14,992 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4ca112a4{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T05:53:14,992 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3cb0a7d3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T05:53:14,992 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T05:53:14,993 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@325d40e6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T05:53:14,993 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2ee1549{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/92aebf17-5bac-fad6-897b-ac32a37f8e80/hadoop.log.dir/,STOPPED} 2024-12-08T05:53:14,995 WARN [BP-178084469-172.17.0.2-1733637156251 heartbeating to localhost/127.0.0.1:45689 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T05:53:14,995 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T05:53:14,995 WARN [BP-178084469-172.17.0.2-1733637156251 heartbeating to localhost/127.0.0.1:45689 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-178084469-172.17.0.2-1733637156251 (Datanode Uuid 670e7c3a-b28b-42f1-b4f1-a1816b844a65) service to localhost/127.0.0.1:45689 2024-12-08T05:53:14,995 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T05:53:14,995 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/92aebf17-5bac-fad6-897b-ac32a37f8e80/cluster_54a16acd-361e-5624-5a58-650eccf65b79/data/data3/current/BP-178084469-172.17.0.2-1733637156251 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T05:53:14,995 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/92aebf17-5bac-fad6-897b-ac32a37f8e80/cluster_54a16acd-361e-5624-5a58-650eccf65b79/data/data4/current/BP-178084469-172.17.0.2-1733637156251 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T05:53:14,996 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T05:53:14,997 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@59287e93{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T05:53:14,998 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@36e18827{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T05:53:14,998 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T05:53:14,998 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@53db1080{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T05:53:14,998 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@14304e8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/92aebf17-5bac-fad6-897b-ac32a37f8e80/hadoop.log.dir/,STOPPED} 2024-12-08T05:53:14,999 WARN [BP-178084469-172.17.0.2-1733637156251 heartbeating to localhost/127.0.0.1:45689 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T05:53:14,999 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T05:53:14,999 WARN [BP-178084469-172.17.0.2-1733637156251 heartbeating to localhost/127.0.0.1:45689 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-178084469-172.17.0.2-1733637156251 (Datanode Uuid fcc70864-cafb-49da-b8c0-681325cbab83) service to localhost/127.0.0.1:45689 2024-12-08T05:53:14,999 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T05:53:15,000 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/92aebf17-5bac-fad6-897b-ac32a37f8e80/cluster_54a16acd-361e-5624-5a58-650eccf65b79/data/data1/current/BP-178084469-172.17.0.2-1733637156251 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T05:53:15,000 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/92aebf17-5bac-fad6-897b-ac32a37f8e80/cluster_54a16acd-361e-5624-5a58-650eccf65b79/data/data2/current/BP-178084469-172.17.0.2-1733637156251 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T05:53:15,000 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T05:53:15,006 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@78d0933c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-08T05:53:15,007 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@32a8e311{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T05:53:15,007 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T05:53:15,007 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@462b3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T05:53:15,007 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@751d1cbe{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/92aebf17-5bac-fad6-897b-ac32a37f8e80/hadoop.log.dir/,STOPPED} 2024-12-08T05:53:15,014 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-08T05:53:15,045 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-08T05:53:15,057 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=232 (was 207) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:45689 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45689 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1609174458) connection to localhost/127.0.0.1:45689 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:45689 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45689 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1609174458) connection to localhost/127.0.0.1:45689 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1609174458) connection to localhost/127.0.0.1:45689 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45689 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=518 (was 483) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=45 (was 88), ProcessCount=11 (was 11), AvailableMemoryMB=7509 (was 7663) 2024-12-08T05:53:15,065 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=232, OpenFileDescriptor=518, MaxFileDescriptor=1048576, SystemLoadAverage=45, ProcessCount=11, AvailableMemoryMB=7509 2024-12-08T05:53:15,065 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-08T05:53:15,065 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/92aebf17-5bac-fad6-897b-ac32a37f8e80/hadoop.log.dir so I do NOT create it in target/test-data/56e147b6-47fb-9016-83f7-441745635da6 2024-12-08T05:53:15,065 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/92aebf17-5bac-fad6-897b-ac32a37f8e80/hadoop.tmp.dir so I do NOT create it in target/test-data/56e147b6-47fb-9016-83f7-441745635da6 2024-12-08T05:53:15,065 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/56e147b6-47fb-9016-83f7-441745635da6/cluster_26be5350-22b6-5c9e-366c-be4db198b576, deleteOnExit=true 2024-12-08T05:53:15,065 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-08T05:53:15,066 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/56e147b6-47fb-9016-83f7-441745635da6/test.cache.data in system properties and HBase conf 2024-12-08T05:53:15,066 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/56e147b6-47fb-9016-83f7-441745635da6/hadoop.tmp.dir in system properties and HBase conf 2024-12-08T05:53:15,066 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/56e147b6-47fb-9016-83f7-441745635da6/hadoop.log.dir in system properties and HBase conf 2024-12-08T05:53:15,066 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/56e147b6-47fb-9016-83f7-441745635da6/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-08T05:53:15,066 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/56e147b6-47fb-9016-83f7-441745635da6/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-08T05:53:15,066 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-08T05:53:15,066 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-08T05:53:15,066 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/56e147b6-47fb-9016-83f7-441745635da6/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-08T05:53:15,066 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/56e147b6-47fb-9016-83f7-441745635da6/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-08T05:53:15,066 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/56e147b6-47fb-9016-83f7-441745635da6/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-08T05:53:15,066 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/56e147b6-47fb-9016-83f7-441745635da6/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-08T05:53:15,066 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/56e147b6-47fb-9016-83f7-441745635da6/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-08T05:53:15,067 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/56e147b6-47fb-9016-83f7-441745635da6/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-08T05:53:15,067 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/56e147b6-47fb-9016-83f7-441745635da6/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-08T05:53:15,067 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/56e147b6-47fb-9016-83f7-441745635da6/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-08T05:53:15,067 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/56e147b6-47fb-9016-83f7-441745635da6/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-08T05:53:15,067 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/56e147b6-47fb-9016-83f7-441745635da6/nfs.dump.dir in system properties and HBase conf 2024-12-08T05:53:15,067 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/56e147b6-47fb-9016-83f7-441745635da6/java.io.tmpdir in system properties and HBase conf 2024-12-08T05:53:15,067 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/56e147b6-47fb-9016-83f7-441745635da6/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-08T05:53:15,067 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/56e147b6-47fb-9016-83f7-441745635da6/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-08T05:53:15,067 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/56e147b6-47fb-9016-83f7-441745635da6/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-08T05:53:15,079 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-08T05:53:15,143 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T05:53:15,146 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T05:53:15,148 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T05:53:15,148 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T05:53:15,149 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T05:53:15,149 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T05:53:15,150 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1f551216{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/56e147b6-47fb-9016-83f7-441745635da6/hadoop.log.dir/,AVAILABLE} 2024-12-08T05:53:15,150 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3670d9ba{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T05:53:15,264 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3665bd5{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/56e147b6-47fb-9016-83f7-441745635da6/java.io.tmpdir/jetty-localhost-41141-hadoop-hdfs-3_4_1-tests_jar-_-any-646284816739034133/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-08T05:53:15,265 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@43ab2d8e{HTTP/1.1, (http/1.1)}{localhost:41141} 2024-12-08T05:53:15,265 INFO [Time-limited test {}] server.Server(415): Started @275449ms 2024-12-08T05:53:15,277 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-08T05:53:15,284 INFO [regionserver/0d942cb2025d:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T05:53:15,326 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T05:53:15,329 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T05:53:15,329 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T05:53:15,329 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T05:53:15,329 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T05:53:15,330 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1fd2bdde{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/56e147b6-47fb-9016-83f7-441745635da6/hadoop.log.dir/,AVAILABLE} 2024-12-08T05:53:15,330 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2628bbf8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T05:53:15,445 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1ca0473d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/56e147b6-47fb-9016-83f7-441745635da6/java.io.tmpdir/jetty-localhost-39267-hadoop-hdfs-3_4_1-tests_jar-_-any-17657170193671978218/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T05:53:15,445 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@63f95535{HTTP/1.1, (http/1.1)}{localhost:39267} 2024-12-08T05:53:15,445 INFO [Time-limited test {}] server.Server(415): Started @275629ms 2024-12-08T05:53:15,446 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T05:53:15,474 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T05:53:15,476 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T05:53:15,477 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T05:53:15,477 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T05:53:15,477 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T05:53:15,478 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5d5d1a79{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/56e147b6-47fb-9016-83f7-441745635da6/hadoop.log.dir/,AVAILABLE} 2024-12-08T05:53:15,478 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4c4cf04c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T05:53:15,550 WARN [Thread-2468 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/56e147b6-47fb-9016-83f7-441745635da6/cluster_26be5350-22b6-5c9e-366c-be4db198b576/data/data2/current/BP-1214476892-172.17.0.2-1733637195085/current, will proceed with Du for space computation calculation, 2024-12-08T05:53:15,550 WARN [Thread-2467 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/56e147b6-47fb-9016-83f7-441745635da6/cluster_26be5350-22b6-5c9e-366c-be4db198b576/data/data1/current/BP-1214476892-172.17.0.2-1733637195085/current, will proceed with Du for space computation calculation, 2024-12-08T05:53:15,566 WARN [Thread-2446 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T05:53:15,569 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1da31590cdcff304 with lease ID 0xbebfc7d8dd2c940f: Processing first storage report for DS-a6359fb7-3c8b-4b7d-8981-7740c2e84f08 from datanode DatanodeRegistration(127.0.0.1:42463, datanodeUuid=39915b98-90ef-467f-a124-1421cf77403e, infoPort=45081, infoSecurePort=0, ipcPort=40985, storageInfo=lv=-57;cid=testClusterID;nsid=1886055556;c=1733637195085) 2024-12-08T05:53:15,569 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1da31590cdcff304 with lease ID 0xbebfc7d8dd2c940f: from storage DS-a6359fb7-3c8b-4b7d-8981-7740c2e84f08 node DatanodeRegistration(127.0.0.1:42463, datanodeUuid=39915b98-90ef-467f-a124-1421cf77403e, infoPort=45081, infoSecurePort=0, ipcPort=40985, storageInfo=lv=-57;cid=testClusterID;nsid=1886055556;c=1733637195085), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T05:53:15,569 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1da31590cdcff304 with lease ID 0xbebfc7d8dd2c940f: Processing first storage report for DS-025083c6-c643-4abd-a3dc-e958642fa4ec from datanode DatanodeRegistration(127.0.0.1:42463, datanodeUuid=39915b98-90ef-467f-a124-1421cf77403e, infoPort=45081, infoSecurePort=0, ipcPort=40985, storageInfo=lv=-57;cid=testClusterID;nsid=1886055556;c=1733637195085) 2024-12-08T05:53:15,569 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1da31590cdcff304 with lease ID 0xbebfc7d8dd2c940f: from storage DS-025083c6-c643-4abd-a3dc-e958642fa4ec node DatanodeRegistration(127.0.0.1:42463, datanodeUuid=39915b98-90ef-467f-a124-1421cf77403e, infoPort=45081, infoSecurePort=0, ipcPort=40985, storageInfo=lv=-57;cid=testClusterID;nsid=1886055556;c=1733637195085), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T05:53:15,610 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@34ce23e2{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/56e147b6-47fb-9016-83f7-441745635da6/java.io.tmpdir/jetty-localhost-45537-hadoop-hdfs-3_4_1-tests_jar-_-any-2273064726976886016/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T05:53:15,610 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2a232eac{HTTP/1.1, (http/1.1)}{localhost:45537} 2024-12-08T05:53:15,610 INFO [Time-limited test {}] server.Server(415): Started @275794ms 2024-12-08T05:53:15,611 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T05:53:15,648 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-08T05:53:15,649 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-08T05:53:15,649 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-08T05:53:15,649 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-12-08T05:53:15,710 WARN [Thread-2493 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/56e147b6-47fb-9016-83f7-441745635da6/cluster_26be5350-22b6-5c9e-366c-be4db198b576/data/data3/current/BP-1214476892-172.17.0.2-1733637195085/current, will proceed with Du for space computation calculation, 2024-12-08T05:53:15,710 WARN [Thread-2494 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/56e147b6-47fb-9016-83f7-441745635da6/cluster_26be5350-22b6-5c9e-366c-be4db198b576/data/data4/current/BP-1214476892-172.17.0.2-1733637195085/current, will proceed with Du for space computation calculation, 2024-12-08T05:53:15,726 WARN [Thread-2482 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T05:53:15,728 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb80141429609a3c4 with lease ID 0xbebfc7d8dd2c9410: Processing first storage report for DS-2cfbe40d-7a59-459b-b28b-d8aff20d4a0d from datanode DatanodeRegistration(127.0.0.1:46063, datanodeUuid=8cdc78c3-7208-460d-b26f-82abaf6194f8, infoPort=38881, infoSecurePort=0, ipcPort=38129, storageInfo=lv=-57;cid=testClusterID;nsid=1886055556;c=1733637195085) 2024-12-08T05:53:15,728 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb80141429609a3c4 with lease ID 0xbebfc7d8dd2c9410: from storage DS-2cfbe40d-7a59-459b-b28b-d8aff20d4a0d node DatanodeRegistration(127.0.0.1:46063, datanodeUuid=8cdc78c3-7208-460d-b26f-82abaf6194f8, infoPort=38881, infoSecurePort=0, ipcPort=38129, storageInfo=lv=-57;cid=testClusterID;nsid=1886055556;c=1733637195085), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T05:53:15,728 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb80141429609a3c4 with lease ID 0xbebfc7d8dd2c9410: Processing first storage report for DS-3c8fd5a2-789e-46ac-ab10-892b0b823e3e from datanode DatanodeRegistration(127.0.0.1:46063, datanodeUuid=8cdc78c3-7208-460d-b26f-82abaf6194f8, infoPort=38881, infoSecurePort=0, ipcPort=38129, storageInfo=lv=-57;cid=testClusterID;nsid=1886055556;c=1733637195085) 2024-12-08T05:53:15,728 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb80141429609a3c4 with lease ID 0xbebfc7d8dd2c9410: from storage DS-3c8fd5a2-789e-46ac-ab10-892b0b823e3e node DatanodeRegistration(127.0.0.1:46063, datanodeUuid=8cdc78c3-7208-460d-b26f-82abaf6194f8, infoPort=38881, infoSecurePort=0, ipcPort=38129, storageInfo=lv=-57;cid=testClusterID;nsid=1886055556;c=1733637195085), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T05:53:15,733 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/56e147b6-47fb-9016-83f7-441745635da6 2024-12-08T05:53:15,736 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/56e147b6-47fb-9016-83f7-441745635da6/cluster_26be5350-22b6-5c9e-366c-be4db198b576/zookeeper_0, clientPort=54003, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/56e147b6-47fb-9016-83f7-441745635da6/cluster_26be5350-22b6-5c9e-366c-be4db198b576/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/56e147b6-47fb-9016-83f7-441745635da6/cluster_26be5350-22b6-5c9e-366c-be4db198b576/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-08T05:53:15,737 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=54003 2024-12-08T05:53:15,737 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:53:15,739 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:53:15,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42463 is added to blk_1073741825_1001 (size=7) 2024-12-08T05:53:15,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46063 is added to blk_1073741825_1001 (size=7) 2024-12-08T05:53:15,751 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:37309/user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b with version=8 2024-12-08T05:53:15,751 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:45577/user/jenkins/test-data/bf8a5545-5794-c2d4-3044-ce65ff76b976/hbase-staging 2024-12-08T05:53:15,753 INFO [Time-limited test {}] client.ConnectionUtils(128): master/0d942cb2025d:0 server-side Connection retries=45 2024-12-08T05:53:15,753 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T05:53:15,753 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T05:53:15,753 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T05:53:15,753 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T05:53:15,753 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T05:53:15,754 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-08T05:53:15,754 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T05:53:15,755 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39335 2024-12-08T05:53:15,756 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:39335 connecting to ZooKeeper ensemble=127.0.0.1:54003 2024-12-08T05:53:15,764 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:393350x0, quorum=127.0.0.1:54003, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T05:53:15,764 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:39335-0x10190a232660000 connected 2024-12-08T05:53:15,780 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:53:15,780 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:53:15,782 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:53:15,783 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:53:15,785 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39335-0x10190a232660000, quorum=127.0.0.1:54003, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T05:53:15,785 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:37309/user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b, hbase.cluster.distributed=false 2024-12-08T05:53:15,787 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39335-0x10190a232660000, quorum=127.0.0.1:54003, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T05:53:15,788 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39335 2024-12-08T05:53:15,789 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39335 2024-12-08T05:53:15,789 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39335 2024-12-08T05:53:15,792 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39335 2024-12-08T05:53:15,792 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39335 2024-12-08T05:53:15,806 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/0d942cb2025d:0 server-side Connection retries=45 2024-12-08T05:53:15,806 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T05:53:15,806 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T05:53:15,807 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T05:53:15,807 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T05:53:15,807 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T05:53:15,807 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-08T05:53:15,807 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T05:53:15,807 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45399 2024-12-08T05:53:15,808 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45399 connecting to ZooKeeper ensemble=127.0.0.1:54003 2024-12-08T05:53:15,809 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:53:15,810 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:53:15,815 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:453990x0, quorum=127.0.0.1:54003, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T05:53:15,816 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:453990x0, quorum=127.0.0.1:54003, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T05:53:15,816 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45399-0x10190a232660001 connected 2024-12-08T05:53:15,816 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-08T05:53:15,818 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-08T05:53:15,818 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45399-0x10190a232660001, quorum=127.0.0.1:54003, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-08T05:53:15,819 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45399-0x10190a232660001, quorum=127.0.0.1:54003, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T05:53:15,820 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45399 2024-12-08T05:53:15,820 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45399 2024-12-08T05:53:15,820 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45399 2024-12-08T05:53:15,820 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45399 2024-12-08T05:53:15,821 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45399 2024-12-08T05:53:15,832 DEBUG [M:0;0d942cb2025d:39335 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;0d942cb2025d:39335 2024-12-08T05:53:15,832 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/0d942cb2025d,39335,1733637195753 2024-12-08T05:53:15,833 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45399-0x10190a232660001, quorum=127.0.0.1:54003, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T05:53:15,833 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39335-0x10190a232660000, quorum=127.0.0.1:54003, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T05:53:15,834 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39335-0x10190a232660000, quorum=127.0.0.1:54003, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/0d942cb2025d,39335,1733637195753 2024-12-08T05:53:15,835 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39335-0x10190a232660000, quorum=127.0.0.1:54003, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:53:15,835 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45399-0x10190a232660001, quorum=127.0.0.1:54003, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-08T05:53:15,835 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45399-0x10190a232660001, quorum=127.0.0.1:54003, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:53:15,836 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39335-0x10190a232660000, quorum=127.0.0.1:54003, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-08T05:53:15,836 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/0d942cb2025d,39335,1733637195753 from backup master directory 2024-12-08T05:53:15,838 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39335-0x10190a232660000, quorum=127.0.0.1:54003, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/0d942cb2025d,39335,1733637195753 2024-12-08T05:53:15,838 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45399-0x10190a232660001, quorum=127.0.0.1:54003, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T05:53:15,838 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39335-0x10190a232660000, quorum=127.0.0.1:54003, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T05:53:15,839 WARN [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T05:53:15,839 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=0d942cb2025d,39335,1733637195753 2024-12-08T05:53:15,842 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:37309/user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b/hbase.id] with ID: 6073ac3b-bacc-42b1-b91a-63ddda62cc98 2024-12-08T05:53:15,842 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:37309/user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b/.tmp/hbase.id 2024-12-08T05:53:15,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42463 is added to blk_1073741826_1002 (size=42) 2024-12-08T05:53:15,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46063 is added to blk_1073741826_1002 (size=42) 2024-12-08T05:53:15,849 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:37309/user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b/.tmp/hbase.id]:[hdfs://localhost:37309/user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b/hbase.id] 2024-12-08T05:53:15,861 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:53:15,861 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-08T05:53:15,862 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-08T05:53:15,864 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39335-0x10190a232660000, quorum=127.0.0.1:54003, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:53:15,864 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45399-0x10190a232660001, quorum=127.0.0.1:54003, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:53:15,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46063 is added to blk_1073741827_1003 (size=196) 2024-12-08T05:53:15,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42463 is added to blk_1073741827_1003 (size=196) 2024-12-08T05:53:15,874 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T05:53:15,875 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-08T05:53:15,876 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T05:53:15,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46063 is added to blk_1073741828_1004 (size=1189) 2024-12-08T05:53:15,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42463 is added to blk_1073741828_1004 (size=1189) 2024-12-08T05:53:15,882 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:37309/user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b/MasterData/data/master/store 2024-12-08T05:53:15,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42463 is added to blk_1073741829_1005 (size=34) 2024-12-08T05:53:15,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46063 is added to blk_1073741829_1005 (size=34) 2024-12-08T05:53:15,888 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T05:53:15,888 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-08T05:53:15,889 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:53:15,889 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:53:15,889 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-08T05:53:15,889 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:53:15,889 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:53:15,889 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733637195888Disabling compacts and flushes for region at 1733637195888Disabling writes for close at 1733637195889 (+1 ms)Writing region close event to WAL at 1733637195889Closed at 1733637195889 2024-12-08T05:53:15,889 WARN [master/0d942cb2025d:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:37309/user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b/MasterData/data/master/store/.initializing 2024-12-08T05:53:15,889 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:37309/user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b/MasterData/WALs/0d942cb2025d,39335,1733637195753 2024-12-08T05:53:15,891 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0d942cb2025d%2C39335%2C1733637195753, suffix=, logDir=hdfs://localhost:37309/user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b/MasterData/WALs/0d942cb2025d,39335,1733637195753, archiveDir=hdfs://localhost:37309/user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b/MasterData/oldWALs, maxLogs=10 2024-12-08T05:53:15,892 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0d942cb2025d%2C39335%2C1733637195753.1733637195891 2024-12-08T05:53:15,901 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b/MasterData/WALs/0d942cb2025d,39335,1733637195753/0d942cb2025d%2C39335%2C1733637195753.1733637195891 2024-12-08T05:53:15,904 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45081:45081),(127.0.0.1/127.0.0.1:38881:38881)] 2024-12-08T05:53:15,907 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-08T05:53:15,907 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T05:53:15,907 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:53:15,907 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:53:15,908 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:53:15,910 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-08T05:53:15,910 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:53:15,910 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:53:15,910 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:53:15,911 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-08T05:53:15,911 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:53:15,912 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T05:53:15,912 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:53:15,913 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-08T05:53:15,913 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:53:15,913 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T05:53:15,913 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:53:15,914 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-08T05:53:15,915 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:53:15,915 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T05:53:15,915 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:53:15,916 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37309/user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:53:15,916 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37309/user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:53:15,917 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:53:15,917 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:53:15,918 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-08T05:53:15,918 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T05:53:15,920 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37309/user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T05:53:15,921 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=708171, jitterRate=-0.09951508045196533}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-08T05:53:15,921 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733637195907Initializing all the Stores at 1733637195908 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733637195908Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733637195908Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733637195908Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733637195908Cleaning up temporary data from old regions at 1733637195917 (+9 ms)Region opened successfully at 1733637195921 (+4 ms) 2024-12-08T05:53:15,921 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-08T05:53:15,924 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1708f810, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0d942cb2025d/172.17.0.2:0 2024-12-08T05:53:15,925 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-08T05:53:15,925 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-08T05:53:15,925 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-08T05:53:15,925 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-08T05:53:15,925 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-08T05:53:15,926 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-08T05:53:15,926 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-08T05:53:15,928 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-08T05:53:15,928 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39335-0x10190a232660000, quorum=127.0.0.1:54003, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-08T05:53:15,929 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-08T05:53:15,930 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-08T05:53:15,930 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39335-0x10190a232660000, quorum=127.0.0.1:54003, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-08T05:53:15,931 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-08T05:53:15,931 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-08T05:53:15,932 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39335-0x10190a232660000, quorum=127.0.0.1:54003, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-08T05:53:15,935 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-08T05:53:15,935 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39335-0x10190a232660000, quorum=127.0.0.1:54003, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-08T05:53:15,936 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-08T05:53:15,938 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39335-0x10190a232660000, quorum=127.0.0.1:54003, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-08T05:53:15,939 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-08T05:53:15,941 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39335-0x10190a232660000, quorum=127.0.0.1:54003, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T05:53:15,941 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39335-0x10190a232660000, quorum=127.0.0.1:54003, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:53:15,941 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45399-0x10190a232660001, quorum=127.0.0.1:54003, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T05:53:15,941 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45399-0x10190a232660001, quorum=127.0.0.1:54003, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:53:15,941 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=0d942cb2025d,39335,1733637195753, sessionid=0x10190a232660000, setting cluster-up flag (Was=false) 2024-12-08T05:53:15,945 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45399-0x10190a232660001, quorum=127.0.0.1:54003, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:53:15,945 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39335-0x10190a232660000, quorum=127.0.0.1:54003, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:53:15,950 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-08T05:53:15,951 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0d942cb2025d,39335,1733637195753 2024-12-08T05:53:15,954 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45399-0x10190a232660001, quorum=127.0.0.1:54003, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:53:15,954 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39335-0x10190a232660000, quorum=127.0.0.1:54003, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:53:15,959 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-08T05:53:15,960 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=0d942cb2025d,39335,1733637195753 2024-12-08T05:53:15,961 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:37309/user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-08T05:53:15,963 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-08T05:53:15,963 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-08T05:53:15,963 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-08T05:53:15,964 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 0d942cb2025d,39335,1733637195753 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-08T05:53:15,965 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/0d942cb2025d:0, corePoolSize=5, maxPoolSize=5 2024-12-08T05:53:15,965 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/0d942cb2025d:0, corePoolSize=5, maxPoolSize=5 2024-12-08T05:53:15,965 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/0d942cb2025d:0, corePoolSize=5, maxPoolSize=5 2024-12-08T05:53:15,965 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/0d942cb2025d:0, corePoolSize=5, maxPoolSize=5 2024-12-08T05:53:15,965 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/0d942cb2025d:0, corePoolSize=10, maxPoolSize=10 2024-12-08T05:53:15,965 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:53:15,965 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/0d942cb2025d:0, corePoolSize=2, maxPoolSize=2 2024-12-08T05:53:15,965 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:53:15,967 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T05:53:15,967 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-08T05:53:15,967 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733637225967 2024-12-08T05:53:15,967 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-08T05:53:15,967 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-08T05:53:15,967 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-08T05:53:15,967 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-08T05:53:15,967 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-08T05:53:15,967 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-08T05:53:15,968 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-08T05:53:15,968 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-08T05:53:15,968 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-08T05:53:15,968 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:53:15,968 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-08T05:53:15,968 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-08T05:53:15,972 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-08T05:53:15,972 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-08T05:53:15,973 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/0d942cb2025d:0:becomeActiveMaster-HFileCleaner.large.0-1733637195972,5,FailOnTimeoutGroup] 2024-12-08T05:53:15,973 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/0d942cb2025d:0:becomeActiveMaster-HFileCleaner.small.0-1733637195973,5,FailOnTimeoutGroup] 2024-12-08T05:53:15,973 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-08T05:53:15,973 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-08T05:53:15,973 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-08T05:53:15,973 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-08T05:53:15,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46063 is added to blk_1073741831_1007 (size=1321) 2024-12-08T05:53:15,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42463 is added to blk_1073741831_1007 (size=1321) 2024-12-08T05:53:15,982 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:37309/user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-08T05:53:15,982 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37309/user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b 2024-12-08T05:53:15,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42463 is added to blk_1073741832_1008 (size=32) 2024-12-08T05:53:15,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46063 is added to blk_1073741832_1008 (size=32) 2024-12-08T05:53:15,992 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T05:53:15,993 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-08T05:53:15,994 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-08T05:53:15,995 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:53:15,995 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:53:15,995 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-08T05:53:15,996 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-08T05:53:15,996 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:53:15,997 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:53:15,997 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-08T05:53:15,998 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-08T05:53:15,998 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:53:15,998 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:53:15,998 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-08T05:53:15,999 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-08T05:53:15,999 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:53:16,000 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:53:16,000 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-08T05:53:16,000 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37309/user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b/data/hbase/meta/1588230740 2024-12-08T05:53:16,000 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37309/user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b/data/hbase/meta/1588230740 2024-12-08T05:53:16,002 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-08T05:53:16,002 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-08T05:53:16,002 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-08T05:53:16,003 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-08T05:53:16,005 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37309/user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T05:53:16,005 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=875074, jitterRate=0.11271470785140991}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-08T05:53:16,006 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733637195992Initializing all the Stores at 1733637195993 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733637195993Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733637195993Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733637195993Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733637195993Cleaning up temporary data from old regions at 1733637196002 (+9 ms)Region opened successfully at 1733637196006 (+4 ms) 2024-12-08T05:53:16,006 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-08T05:53:16,006 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-08T05:53:16,006 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-08T05:53:16,006 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-08T05:53:16,006 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-08T05:53:16,007 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-08T05:53:16,007 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733637196006Disabling compacts and flushes for region at 1733637196006Disabling writes for close at 1733637196006Writing region close event to WAL at 1733637196007 (+1 ms)Closed at 1733637196007 2024-12-08T05:53:16,008 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T05:53:16,008 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-08T05:53:16,008 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-08T05:53:16,009 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-08T05:53:16,010 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-08T05:53:16,022 INFO [RS:0;0d942cb2025d:45399 {}] regionserver.HRegionServer(746): ClusterId : 6073ac3b-bacc-42b1-b91a-63ddda62cc98 2024-12-08T05:53:16,022 DEBUG [RS:0;0d942cb2025d:45399 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-08T05:53:16,024 DEBUG [RS:0;0d942cb2025d:45399 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-08T05:53:16,024 DEBUG [RS:0;0d942cb2025d:45399 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-08T05:53:16,025 DEBUG [RS:0;0d942cb2025d:45399 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-08T05:53:16,026 DEBUG [RS:0;0d942cb2025d:45399 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@561ece0f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=0d942cb2025d/172.17.0.2:0 2024-12-08T05:53:16,037 DEBUG [RS:0;0d942cb2025d:45399 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;0d942cb2025d:45399 2024-12-08T05:53:16,037 INFO [RS:0;0d942cb2025d:45399 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-08T05:53:16,037 INFO [RS:0;0d942cb2025d:45399 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-08T05:53:16,037 DEBUG [RS:0;0d942cb2025d:45399 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-08T05:53:16,038 INFO [RS:0;0d942cb2025d:45399 {}] regionserver.HRegionServer(2659): reportForDuty to master=0d942cb2025d,39335,1733637195753 with port=45399, startcode=1733637195806 2024-12-08T05:53:16,038 DEBUG [RS:0;0d942cb2025d:45399 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-08T05:53:16,040 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52553, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-12-08T05:53:16,040 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39335 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 0d942cb2025d,45399,1733637195806 2024-12-08T05:53:16,041 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39335 {}] master.ServerManager(517): Registering regionserver=0d942cb2025d,45399,1733637195806 2024-12-08T05:53:16,042 DEBUG [RS:0;0d942cb2025d:45399 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37309/user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b 2024-12-08T05:53:16,042 DEBUG [RS:0;0d942cb2025d:45399 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37309 2024-12-08T05:53:16,042 DEBUG [RS:0;0d942cb2025d:45399 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-08T05:53:16,044 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39335-0x10190a232660000, quorum=127.0.0.1:54003, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T05:53:16,045 DEBUG [RS:0;0d942cb2025d:45399 {}] zookeeper.ZKUtil(111): regionserver:45399-0x10190a232660001, quorum=127.0.0.1:54003, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/0d942cb2025d,45399,1733637195806 2024-12-08T05:53:16,045 WARN [RS:0;0d942cb2025d:45399 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T05:53:16,045 INFO [RS:0;0d942cb2025d:45399 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T05:53:16,045 DEBUG [RS:0;0d942cb2025d:45399 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37309/user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b/WALs/0d942cb2025d,45399,1733637195806 2024-12-08T05:53:16,045 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [0d942cb2025d,45399,1733637195806] 2024-12-08T05:53:16,049 INFO [RS:0;0d942cb2025d:45399 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-08T05:53:16,051 INFO [RS:0;0d942cb2025d:45399 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-08T05:53:16,051 INFO [RS:0;0d942cb2025d:45399 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-08T05:53:16,051 INFO [RS:0;0d942cb2025d:45399 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T05:53:16,051 INFO [RS:0;0d942cb2025d:45399 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-08T05:53:16,052 INFO [RS:0;0d942cb2025d:45399 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-08T05:53:16,052 INFO [RS:0;0d942cb2025d:45399 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-08T05:53:16,052 DEBUG [RS:0;0d942cb2025d:45399 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:53:16,052 DEBUG [RS:0;0d942cb2025d:45399 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:53:16,052 DEBUG [RS:0;0d942cb2025d:45399 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:53:16,052 DEBUG [RS:0;0d942cb2025d:45399 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:53:16,052 DEBUG [RS:0;0d942cb2025d:45399 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:53:16,052 DEBUG [RS:0;0d942cb2025d:45399 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/0d942cb2025d:0, corePoolSize=2, maxPoolSize=2 2024-12-08T05:53:16,052 DEBUG [RS:0;0d942cb2025d:45399 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:53:16,052 DEBUG [RS:0;0d942cb2025d:45399 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:53:16,052 DEBUG [RS:0;0d942cb2025d:45399 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:53:16,052 DEBUG [RS:0;0d942cb2025d:45399 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:53:16,052 DEBUG [RS:0;0d942cb2025d:45399 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:53:16,053 DEBUG [RS:0;0d942cb2025d:45399 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/0d942cb2025d:0, corePoolSize=1, maxPoolSize=1 2024-12-08T05:53:16,053 DEBUG [RS:0;0d942cb2025d:45399 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/0d942cb2025d:0, corePoolSize=3, maxPoolSize=3 2024-12-08T05:53:16,053 DEBUG [RS:0;0d942cb2025d:45399 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/0d942cb2025d:0, corePoolSize=3, maxPoolSize=3 2024-12-08T05:53:16,053 INFO [RS:0;0d942cb2025d:45399 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T05:53:16,053 INFO [RS:0;0d942cb2025d:45399 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T05:53:16,053 INFO [RS:0;0d942cb2025d:45399 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T05:53:16,053 INFO [RS:0;0d942cb2025d:45399 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-08T05:53:16,053 INFO [RS:0;0d942cb2025d:45399 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-08T05:53:16,053 INFO [RS:0;0d942cb2025d:45399 {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,45399,1733637195806-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T05:53:16,067 INFO [RS:0;0d942cb2025d:45399 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-08T05:53:16,067 INFO [RS:0;0d942cb2025d:45399 {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,45399,1733637195806-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T05:53:16,067 INFO [RS:0;0d942cb2025d:45399 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T05:53:16,067 INFO [RS:0;0d942cb2025d:45399 {}] regionserver.Replication(171): 0d942cb2025d,45399,1733637195806 started 2024-12-08T05:53:16,080 INFO [RS:0;0d942cb2025d:45399 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T05:53:16,080 INFO [RS:0;0d942cb2025d:45399 {}] regionserver.HRegionServer(1482): Serving as 0d942cb2025d,45399,1733637195806, RpcServer on 0d942cb2025d/172.17.0.2:45399, sessionid=0x10190a232660001 2024-12-08T05:53:16,081 DEBUG [RS:0;0d942cb2025d:45399 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-08T05:53:16,081 DEBUG [RS:0;0d942cb2025d:45399 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 0d942cb2025d,45399,1733637195806 2024-12-08T05:53:16,081 DEBUG [RS:0;0d942cb2025d:45399 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0d942cb2025d,45399,1733637195806' 2024-12-08T05:53:16,081 DEBUG [RS:0;0d942cb2025d:45399 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-08T05:53:16,081 DEBUG [RS:0;0d942cb2025d:45399 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-08T05:53:16,081 DEBUG [RS:0;0d942cb2025d:45399 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-08T05:53:16,081 DEBUG [RS:0;0d942cb2025d:45399 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-08T05:53:16,081 DEBUG [RS:0;0d942cb2025d:45399 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 0d942cb2025d,45399,1733637195806 2024-12-08T05:53:16,081 DEBUG [RS:0;0d942cb2025d:45399 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '0d942cb2025d,45399,1733637195806' 2024-12-08T05:53:16,081 DEBUG [RS:0;0d942cb2025d:45399 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-08T05:53:16,082 DEBUG [RS:0;0d942cb2025d:45399 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-08T05:53:16,082 DEBUG [RS:0;0d942cb2025d:45399 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-08T05:53:16,082 INFO [RS:0;0d942cb2025d:45399 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-08T05:53:16,082 INFO [RS:0;0d942cb2025d:45399 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-08T05:53:16,160 WARN [0d942cb2025d:39335 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-08T05:53:16,184 INFO [RS:0;0d942cb2025d:45399 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0d942cb2025d%2C45399%2C1733637195806, suffix=, logDir=hdfs://localhost:37309/user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b/WALs/0d942cb2025d,45399,1733637195806, archiveDir=hdfs://localhost:37309/user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b/oldWALs, maxLogs=32 2024-12-08T05:53:16,184 INFO [RS:0;0d942cb2025d:45399 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0d942cb2025d%2C45399%2C1733637195806.1733637196184 2024-12-08T05:53:16,190 INFO [RS:0;0d942cb2025d:45399 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b/WALs/0d942cb2025d,45399,1733637195806/0d942cb2025d%2C45399%2C1733637195806.1733637196184 2024-12-08T05:53:16,192 DEBUG [RS:0;0d942cb2025d:45399 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45081:45081),(127.0.0.1/127.0.0.1:38881:38881)] 2024-12-08T05:53:16,410 DEBUG [0d942cb2025d:39335 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-08T05:53:16,411 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=0d942cb2025d,45399,1733637195806 2024-12-08T05:53:16,412 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0d942cb2025d,45399,1733637195806, state=OPENING 2024-12-08T05:53:16,413 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-08T05:53:16,415 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39335-0x10190a232660000, quorum=127.0.0.1:54003, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:53:16,415 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45399-0x10190a232660001, quorum=127.0.0.1:54003, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:53:16,415 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-08T05:53:16,415 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T05:53:16,415 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T05:53:16,415 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=0d942cb2025d,45399,1733637195806}] 2024-12-08T05:53:16,568 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-08T05:53:16,570 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57453, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-08T05:53:16,573 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-08T05:53:16,573 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T05:53:16,574 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=0d942cb2025d%2C45399%2C1733637195806.meta, suffix=.meta, logDir=hdfs://localhost:37309/user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b/WALs/0d942cb2025d,45399,1733637195806, archiveDir=hdfs://localhost:37309/user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b/oldWALs, maxLogs=32 2024-12-08T05:53:16,574 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 0d942cb2025d%2C45399%2C1733637195806.meta.1733637196574.meta 2024-12-08T05:53:16,579 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b/WALs/0d942cb2025d,45399,1733637195806/0d942cb2025d%2C45399%2C1733637195806.meta.1733637196574.meta 2024-12-08T05:53:16,584 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45081:45081),(127.0.0.1/127.0.0.1:38881:38881)] 2024-12-08T05:53:16,588 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-08T05:53:16,589 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-08T05:53:16,589 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-08T05:53:16,589 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-08T05:53:16,589 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-08T05:53:16,589 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T05:53:16,589 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-08T05:53:16,589 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-08T05:53:16,590 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-08T05:53:16,591 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-08T05:53:16,591 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:53:16,591 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:53:16,591 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-08T05:53:16,592 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-08T05:53:16,592 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:53:16,592 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:53:16,592 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-08T05:53:16,593 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-08T05:53:16,593 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:53:16,593 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:53:16,593 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-08T05:53:16,594 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-08T05:53:16,594 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T05:53:16,594 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T05:53:16,594 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-08T05:53:16,595 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37309/user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b/data/hbase/meta/1588230740 2024-12-08T05:53:16,595 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37309/user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b/data/hbase/meta/1588230740 2024-12-08T05:53:16,597 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-08T05:53:16,597 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-08T05:53:16,597 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-08T05:53:16,598 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-08T05:53:16,599 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=794085, jitterRate=0.00973173975944519}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-08T05:53:16,599 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-08T05:53:16,599 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733637196589Writing region info on filesystem at 1733637196589Initializing all the Stores at 1733637196590 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733637196590Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733637196590Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733637196590Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733637196590Cleaning up temporary data from old regions at 1733637196597 (+7 ms)Running coprocessor post-open hooks at 1733637196599 (+2 ms)Region opened successfully at 1733637196599 2024-12-08T05:53:16,600 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733637196567 2024-12-08T05:53:16,602 DEBUG [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-08T05:53:16,602 INFO [RS_OPEN_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-08T05:53:16,602 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=0d942cb2025d,45399,1733637195806 2024-12-08T05:53:16,603 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 0d942cb2025d,45399,1733637195806, state=OPEN 2024-12-08T05:53:16,612 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45399-0x10190a232660001, quorum=127.0.0.1:54003, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T05:53:16,612 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39335-0x10190a232660000, quorum=127.0.0.1:54003, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T05:53:16,612 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=0d942cb2025d,45399,1733637195806 2024-12-08T05:53:16,612 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T05:53:16,612 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T05:53:16,614 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-08T05:53:16,614 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=0d942cb2025d,45399,1733637195806 in 197 msec 2024-12-08T05:53:16,616 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-08T05:53:16,616 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 606 msec 2024-12-08T05:53:16,616 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T05:53:16,616 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-08T05:53:16,618 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-08T05:53:16,618 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=0d942cb2025d,45399,1733637195806, seqNum=-1] 2024-12-08T05:53:16,618 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T05:53:16,620 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38827, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T05:53:16,624 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 661 msec 2024-12-08T05:53:16,624 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733637196624, completionTime=-1 2024-12-08T05:53:16,624 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-08T05:53:16,624 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-08T05:53:16,626 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-08T05:53:16,626 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733637256626 2024-12-08T05:53:16,626 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733637316626 2024-12-08T05:53:16,626 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-12-08T05:53:16,626 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,39335,1733637195753-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T05:53:16,627 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,39335,1733637195753-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T05:53:16,627 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,39335,1733637195753-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T05:53:16,627 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-0d942cb2025d:39335, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T05:53:16,627 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-08T05:53:16,627 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-08T05:53:16,628 DEBUG [master/0d942cb2025d:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-08T05:53:16,630 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.791sec 2024-12-08T05:53:16,630 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-08T05:53:16,630 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-08T05:53:16,630 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-08T05:53:16,630 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-08T05:53:16,630 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-08T05:53:16,630 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,39335,1733637195753-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T05:53:16,630 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,39335,1733637195753-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-08T05:53:16,632 DEBUG [master/0d942cb2025d:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-08T05:53:16,633 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-08T05:53:16,633 INFO [master/0d942cb2025d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=0d942cb2025d,39335,1733637195753-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T05:53:16,723 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@14800663, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T05:53:16,723 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 0d942cb2025d,39335,-1 for getting cluster id 2024-12-08T05:53:16,723 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-08T05:53:16,724 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6073ac3b-bacc-42b1-b91a-63ddda62cc98' 2024-12-08T05:53:16,724 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-08T05:53:16,725 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6073ac3b-bacc-42b1-b91a-63ddda62cc98" 2024-12-08T05:53:16,725 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@34e21b3f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T05:53:16,725 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [0d942cb2025d,39335,-1] 2024-12-08T05:53:16,725 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-08T05:53:16,725 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T05:53:16,727 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46418, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-08T05:53:16,727 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@428decd4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T05:53:16,728 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-08T05:53:16,729 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=0d942cb2025d,45399,1733637195806, seqNum=-1] 2024-12-08T05:53:16,729 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T05:53:16,730 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51970, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T05:53:16,731 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=0d942cb2025d,39335,1733637195753 2024-12-08T05:53:16,732 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T05:53:16,734 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-08T05:53:16,734 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-08T05:53:16,736 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:37309/user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b/WALs/test.com,8080,1, archiveDir=hdfs://localhost:37309/user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b/oldWALs, maxLogs=32 2024-12-08T05:53:16,736 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1733637196736 2024-12-08T05:53:16,740 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b/WALs/test.com,8080,1/test.com%2C8080%2C1.1733637196736 2024-12-08T05:53:16,748 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38881:38881),(127.0.0.1/127.0.0.1:45081:45081)] 2024-12-08T05:53:16,748 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1733637196748 2024-12-08T05:53:16,753 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:53:16,753 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:53:16,753 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:53:16,753 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:53:16,753 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:53:16,753 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b/WALs/test.com,8080,1/test.com%2C8080%2C1.1733637196736 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b/WALs/test.com,8080,1/test.com%2C8080%2C1.1733637196748 2024-12-08T05:53:16,754 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38881:38881),(127.0.0.1/127.0.0.1:45081:45081)] 2024-12-08T05:53:16,754 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:37309/user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b/WALs/test.com,8080,1/test.com%2C8080%2C1.1733637196736 is not closed yet, will try archiving it next time 2024-12-08T05:53:16,754 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:53:16,754 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:53:16,755 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:53:16,755 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:53:16,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42463 is added to blk_1073741835_1011 (size=93) 2024-12-08T05:53:16,755 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:53:16,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46063 is added to blk_1073741835_1011 (size=93) 2024-12-08T05:53:16,756 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37309/user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b/WALs/test.com,8080,1/test.com%2C8080%2C1.1733637196736 to hdfs://localhost:37309/user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b/oldWALs/test.com%2C8080%2C1.1733637196736 2024-12-08T05:53:16,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42463 is added to blk_1073741836_1012 (size=93) 2024-12-08T05:53:16,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46063 is added to blk_1073741836_1012 (size=93) 2024-12-08T05:53:16,758 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b/oldWALs 2024-12-08T05:53:16,758 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1733637196748) 2024-12-08T05:53:16,758 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-08T05:53:16,758 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-08T05:53:16,758 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T05:53:16,758 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T05:53:16,758 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T05:53:16,759 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-08T05:53:16,759 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-08T05:53:16,759 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1362842911, stopped=false 2024-12-08T05:53:16,759 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=0d942cb2025d,39335,1733637195753 2024-12-08T05:53:16,760 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39335-0x10190a232660000, quorum=127.0.0.1:54003, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T05:53:16,760 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45399-0x10190a232660001, quorum=127.0.0.1:54003, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T05:53:16,760 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39335-0x10190a232660000, quorum=127.0.0.1:54003, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:53:16,760 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45399-0x10190a232660001, quorum=127.0.0.1:54003, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:53:16,760 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-08T05:53:16,760 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-08T05:53:16,760 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T05:53:16,760 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T05:53:16,761 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '0d942cb2025d,45399,1733637195806' ***** 2024-12-08T05:53:16,761 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-08T05:53:16,761 INFO [RS:0;0d942cb2025d:45399 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-08T05:53:16,761 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45399-0x10190a232660001, quorum=127.0.0.1:54003, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T05:53:16,761 INFO [RS:0;0d942cb2025d:45399 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-08T05:53:16,761 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:39335-0x10190a232660000, quorum=127.0.0.1:54003, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T05:53:16,761 INFO [RS:0;0d942cb2025d:45399 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-08T05:53:16,761 INFO [RS:0;0d942cb2025d:45399 {}] regionserver.HRegionServer(959): stopping server 0d942cb2025d,45399,1733637195806 2024-12-08T05:53:16,761 INFO [RS:0;0d942cb2025d:45399 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T05:53:16,761 INFO [RS:0;0d942cb2025d:45399 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;0d942cb2025d:45399. 2024-12-08T05:53:16,761 DEBUG [RS:0;0d942cb2025d:45399 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T05:53:16,761 DEBUG [RS:0;0d942cb2025d:45399 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T05:53:16,761 INFO [RS:0;0d942cb2025d:45399 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-08T05:53:16,761 INFO [RS:0;0d942cb2025d:45399 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-08T05:53:16,761 INFO [RS:0;0d942cb2025d:45399 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-08T05:53:16,761 INFO [RS:0;0d942cb2025d:45399 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-08T05:53:16,762 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-08T05:53:16,762 INFO [RS:0;0d942cb2025d:45399 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-08T05:53:16,762 DEBUG [RS:0;0d942cb2025d:45399 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-08T05:53:16,762 DEBUG [RS:0;0d942cb2025d:45399 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-08T05:53:16,762 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-08T05:53:16,762 INFO [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-08T05:53:16,762 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-08T05:53:16,762 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-08T05:53:16,762 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-08T05:53:16,762 INFO [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-12-08T05:53:16,777 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37309/user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b/data/hbase/meta/1588230740/.tmp/ns/f99a7d22ca064aec8ce5316f18517974 is 43, key is default/ns:d/1733637196620/Put/seqid=0 2024-12-08T05:53:16,780 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36443,1733637025646/0d942cb2025d%2C36443%2C1733637025646.1733637025862 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:53:16,780 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46561/user/jenkins/test-data/4b279590-ed83-d3a9-06fe-09bbbfdc8b10/WALs/0d942cb2025d,36989,1733637024575/0d942cb2025d%2C36989%2C1733637024575.meta.1733637025501.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-08T05:53:16,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46063 is added to blk_1073741837_1013 (size=5153) 2024-12-08T05:53:16,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42463 is added to blk_1073741837_1013 (size=5153) 2024-12-08T05:53:16,782 INFO [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37309/user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b/data/hbase/meta/1588230740/.tmp/ns/f99a7d22ca064aec8ce5316f18517974 2024-12-08T05:53:16,786 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37309/user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b/data/hbase/meta/1588230740/.tmp/ns/f99a7d22ca064aec8ce5316f18517974 as hdfs://localhost:37309/user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b/data/hbase/meta/1588230740/ns/f99a7d22ca064aec8ce5316f18517974 2024-12-08T05:53:16,790 INFO [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37309/user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b/data/hbase/meta/1588230740/ns/f99a7d22ca064aec8ce5316f18517974, entries=2, sequenceid=6, filesize=5.0 K 2024-12-08T05:53:16,791 INFO [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 29ms, sequenceid=6, compaction requested=false 2024-12-08T05:53:16,794 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37309/user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-08T05:53:16,794 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-08T05:53:16,794 INFO [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-08T05:53:16,795 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733637196762Running coprocessor pre-close hooks at 1733637196762Disabling compacts and flushes for region at 1733637196762Disabling writes for close at 1733637196762Obtaining lock to block concurrent updates at 1733637196762Preparing flush snapshotting stores in 1588230740 at 1733637196762Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1733637196762Flushing stores of hbase:meta,,1.1588230740 at 1733637196763 (+1 ms)Flushing 1588230740/ns: creating writer at 1733637196763Flushing 1588230740/ns: appending metadata at 1733637196777 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1733637196777Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@36fa691c: reopening flushed file at 1733637196786 (+9 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 29ms, sequenceid=6, compaction requested=false at 1733637196791 (+5 ms)Writing region close event to WAL at 1733637196791Running coprocessor post-close hooks at 1733637196794 (+3 ms)Closed at 1733637196794 2024-12-08T05:53:16,795 DEBUG [RS_CLOSE_META-regionserver/0d942cb2025d:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-08T05:53:16,962 INFO [RS:0;0d942cb2025d:45399 {}] regionserver.HRegionServer(976): stopping server 0d942cb2025d,45399,1733637195806; all regions closed. 2024-12-08T05:53:16,962 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:53:16,963 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:53:16,963 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:53:16,963 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:53:16,963 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:53:16,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46063 is added to blk_1073741834_1010 (size=1152) 2024-12-08T05:53:16,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42463 is added to blk_1073741834_1010 (size=1152) 2024-12-08T05:53:16,967 DEBUG [RS:0;0d942cb2025d:45399 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b/oldWALs 2024-12-08T05:53:16,967 INFO [RS:0;0d942cb2025d:45399 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 0d942cb2025d%2C45399%2C1733637195806.meta:.meta(num 1733637196574) 2024-12-08T05:53:16,967 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:53:16,967 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:53:16,968 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:53:16,968 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:53:16,968 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:53:16,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46063 is added to blk_1073741833_1009 (size=93) 2024-12-08T05:53:16,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42463 is added to blk_1073741833_1009 (size=93) 2024-12-08T05:53:16,971 DEBUG [RS:0;0d942cb2025d:45399 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b/oldWALs 2024-12-08T05:53:16,971 INFO [RS:0;0d942cb2025d:45399 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 0d942cb2025d%2C45399%2C1733637195806:(num 1733637196184) 2024-12-08T05:53:16,971 DEBUG [RS:0;0d942cb2025d:45399 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T05:53:16,971 INFO [RS:0;0d942cb2025d:45399 {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T05:53:16,971 INFO [RS:0;0d942cb2025d:45399 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T05:53:16,972 INFO [RS:0;0d942cb2025d:45399 {}] hbase.ChoreService(370): Chore service for: regionserver/0d942cb2025d:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-08T05:53:16,972 INFO [RS:0;0d942cb2025d:45399 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T05:53:16,972 INFO [regionserver/0d942cb2025d:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T05:53:16,972 INFO [RS:0;0d942cb2025d:45399 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45399 2024-12-08T05:53:16,974 INFO [RS:0;0d942cb2025d:45399 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T05:53:16,974 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39335-0x10190a232660000, quorum=127.0.0.1:54003, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T05:53:16,974 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45399-0x10190a232660001, quorum=127.0.0.1:54003, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/0d942cb2025d,45399,1733637195806 2024-12-08T05:53:16,975 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [0d942cb2025d,45399,1733637195806] 2024-12-08T05:53:16,977 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/0d942cb2025d,45399,1733637195806 already deleted, retry=false 2024-12-08T05:53:16,977 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 0d942cb2025d,45399,1733637195806 expired; onlineServers=0 2024-12-08T05:53:16,977 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '0d942cb2025d,39335,1733637195753' ***** 2024-12-08T05:53:16,977 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-08T05:53:16,977 INFO [M:0;0d942cb2025d:39335 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T05:53:16,977 INFO [M:0;0d942cb2025d:39335 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T05:53:16,977 DEBUG [M:0;0d942cb2025d:39335 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-08T05:53:16,977 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-08T05:53:16,977 DEBUG [M:0;0d942cb2025d:39335 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-08T05:53:16,977 DEBUG [master/0d942cb2025d:0:becomeActiveMaster-HFileCleaner.large.0-1733637195972 {}] cleaner.HFileCleaner(306): Exit Thread[master/0d942cb2025d:0:becomeActiveMaster-HFileCleaner.large.0-1733637195972,5,FailOnTimeoutGroup] 2024-12-08T05:53:16,977 DEBUG [master/0d942cb2025d:0:becomeActiveMaster-HFileCleaner.small.0-1733637195973 {}] cleaner.HFileCleaner(306): Exit Thread[master/0d942cb2025d:0:becomeActiveMaster-HFileCleaner.small.0-1733637195973,5,FailOnTimeoutGroup] 2024-12-08T05:53:16,978 INFO [M:0;0d942cb2025d:39335 {}] hbase.ChoreService(370): Chore service for: master/0d942cb2025d:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-08T05:53:16,978 INFO [M:0;0d942cb2025d:39335 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T05:53:16,978 DEBUG [M:0;0d942cb2025d:39335 {}] master.HMaster(1795): Stopping service threads 2024-12-08T05:53:16,978 INFO [M:0;0d942cb2025d:39335 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-08T05:53:16,978 INFO [M:0;0d942cb2025d:39335 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-08T05:53:16,978 INFO [M:0;0d942cb2025d:39335 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-08T05:53:16,978 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-08T05:53:16,979 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39335-0x10190a232660000, quorum=127.0.0.1:54003, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-08T05:53:16,979 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39335-0x10190a232660000, quorum=127.0.0.1:54003, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T05:53:16,979 DEBUG [M:0;0d942cb2025d:39335 {}] zookeeper.ZKUtil(347): master:39335-0x10190a232660000, quorum=127.0.0.1:54003, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-08T05:53:16,979 WARN [M:0;0d942cb2025d:39335 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-08T05:53:16,979 INFO [M:0;0d942cb2025d:39335 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:37309/user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b/.lastflushedseqids 2024-12-08T05:53:16,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46063 is added to blk_1073741838_1014 (size=99) 2024-12-08T05:53:16,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42463 is added to blk_1073741838_1014 (size=99) 2024-12-08T05:53:16,984 INFO [M:0;0d942cb2025d:39335 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-08T05:53:16,984 INFO [M:0;0d942cb2025d:39335 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-08T05:53:16,985 DEBUG [M:0;0d942cb2025d:39335 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-08T05:53:16,985 INFO [M:0;0d942cb2025d:39335 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:53:16,985 DEBUG [M:0;0d942cb2025d:39335 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:53:16,985 DEBUG [M:0;0d942cb2025d:39335 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-08T05:53:16,985 DEBUG [M:0;0d942cb2025d:39335 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:53:16,985 INFO [M:0;0d942cb2025d:39335 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-12-08T05:53:17,000 DEBUG [M:0;0d942cb2025d:39335 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37309/user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c2af20d8bdbe4461bcf1a9b7d17f5e4a is 82, key is hbase:meta,,1/info:regioninfo/1733637196602/Put/seqid=0 2024-12-08T05:53:17,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42463 is added to blk_1073741839_1015 (size=5672) 2024-12-08T05:53:17,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46063 is added to blk_1073741839_1015 (size=5672) 2024-12-08T05:53:17,005 INFO [M:0;0d942cb2025d:39335 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:37309/user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c2af20d8bdbe4461bcf1a9b7d17f5e4a 2024-12-08T05:53:17,024 DEBUG [M:0;0d942cb2025d:39335 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37309/user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d46462cbeb284d6abd48d97da8bc2496 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1733637196624/Put/seqid=0 2024-12-08T05:53:17,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46063 is added to blk_1073741840_1016 (size=5275) 2024-12-08T05:53:17,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42463 is added to blk_1073741840_1016 (size=5275) 2024-12-08T05:53:17,029 INFO [M:0;0d942cb2025d:39335 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:37309/user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d46462cbeb284d6abd48d97da8bc2496 2024-12-08T05:53:17,047 DEBUG [M:0;0d942cb2025d:39335 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37309/user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5689444f6207483fbdeadce79968678e is 69, key is 0d942cb2025d,45399,1733637195806/rs:state/1733637196041/Put/seqid=0 2024-12-08T05:53:17,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46063 is added to blk_1073741841_1017 (size=5156) 2024-12-08T05:53:17,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42463 is added to blk_1073741841_1017 (size=5156) 2024-12-08T05:53:17,052 INFO [M:0;0d942cb2025d:39335 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:37309/user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5689444f6207483fbdeadce79968678e 2024-12-08T05:53:17,070 DEBUG [M:0;0d942cb2025d:39335 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37309/user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/62ecffa3d5444b6c8a2b93fa2a0e782a is 52, key is load_balancer_on/state:d/1733637196733/Put/seqid=0 2024-12-08T05:53:17,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46063 is added to blk_1073741842_1018 (size=5056) 2024-12-08T05:53:17,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42463 is added to blk_1073741842_1018 (size=5056) 2024-12-08T05:53:17,074 INFO [M:0;0d942cb2025d:39335 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:37309/user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/62ecffa3d5444b6c8a2b93fa2a0e782a 2024-12-08T05:53:17,075 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45399-0x10190a232660001, quorum=127.0.0.1:54003, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T05:53:17,075 INFO [RS:0;0d942cb2025d:45399 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T05:53:17,075 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45399-0x10190a232660001, quorum=127.0.0.1:54003, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T05:53:17,075 INFO [RS:0;0d942cb2025d:45399 {}] regionserver.HRegionServer(1031): Exiting; stopping=0d942cb2025d,45399,1733637195806; zookeeper connection closed. 2024-12-08T05:53:17,076 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@b4ec744 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@b4ec744 2024-12-08T05:53:17,076 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-08T05:53:17,079 DEBUG [M:0;0d942cb2025d:39335 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37309/user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c2af20d8bdbe4461bcf1a9b7d17f5e4a as hdfs://localhost:37309/user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c2af20d8bdbe4461bcf1a9b7d17f5e4a 2024-12-08T05:53:17,082 INFO [M:0;0d942cb2025d:39335 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37309/user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c2af20d8bdbe4461bcf1a9b7d17f5e4a, entries=8, sequenceid=29, filesize=5.5 K 2024-12-08T05:53:17,083 DEBUG [M:0;0d942cb2025d:39335 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37309/user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d46462cbeb284d6abd48d97da8bc2496 as hdfs://localhost:37309/user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/d46462cbeb284d6abd48d97da8bc2496 2024-12-08T05:53:17,086 INFO [M:0;0d942cb2025d:39335 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37309/user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/d46462cbeb284d6abd48d97da8bc2496, entries=3, sequenceid=29, filesize=5.2 K 2024-12-08T05:53:17,087 DEBUG [M:0;0d942cb2025d:39335 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37309/user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5689444f6207483fbdeadce79968678e as hdfs://localhost:37309/user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/5689444f6207483fbdeadce79968678e 2024-12-08T05:53:17,090 INFO [M:0;0d942cb2025d:39335 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37309/user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/5689444f6207483fbdeadce79968678e, entries=1, sequenceid=29, filesize=5.0 K 2024-12-08T05:53:17,091 DEBUG [M:0;0d942cb2025d:39335 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37309/user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/62ecffa3d5444b6c8a2b93fa2a0e782a as hdfs://localhost:37309/user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/62ecffa3d5444b6c8a2b93fa2a0e782a 2024-12-08T05:53:17,094 INFO [M:0;0d942cb2025d:39335 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37309/user/jenkins/test-data/4eb4778b-3986-bcd9-2976-07bfec878a2b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/62ecffa3d5444b6c8a2b93fa2a0e782a, entries=1, sequenceid=29, filesize=4.9 K 2024-12-08T05:53:17,095 INFO [M:0;0d942cb2025d:39335 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 110ms, sequenceid=29, compaction requested=false 2024-12-08T05:53:17,096 INFO [M:0;0d942cb2025d:39335 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T05:53:17,096 DEBUG [M:0;0d942cb2025d:39335 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733637196984Disabling compacts and flushes for region at 1733637196984Disabling writes for close at 1733637196985 (+1 ms)Obtaining lock to block concurrent updates at 1733637196985Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733637196985Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1733637196985Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733637196986 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733637196986Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733637197000 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733637197000Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733637197010 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733637197024 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733637197024Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733637197033 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733637197047 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733637197047Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733637197056 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733637197069 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733637197069Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7d4d317d: reopening flushed file at 1733637197078 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@48c0e339: reopening flushed file at 1733637197082 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@16e2bd2a: reopening flushed file at 1733637197086 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6bd2db29: reopening flushed file at 1733637197090 (+4 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 110ms, sequenceid=29, compaction requested=false at 1733637197095 (+5 ms)Writing region close event to WAL at 1733637197096 (+1 ms)Closed at 1733637197096 2024-12-08T05:53:17,097 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:53:17,097 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:53:17,097 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:53:17,097 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:53:17,097 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-08T05:53:17,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46063 is added to blk_1073741830_1006 (size=10311) 2024-12-08T05:53:17,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42463 is added to blk_1073741830_1006 (size=10311) 2024-12-08T05:53:17,100 INFO [M:0;0d942cb2025d:39335 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-08T05:53:17,100 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T05:53:17,100 INFO [M:0;0d942cb2025d:39335 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39335 2024-12-08T05:53:17,100 INFO [M:0;0d942cb2025d:39335 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T05:53:17,202 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39335-0x10190a232660000, quorum=127.0.0.1:54003, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T05:53:17,202 INFO [M:0;0d942cb2025d:39335 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T05:53:17,202 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39335-0x10190a232660000, quorum=127.0.0.1:54003, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T05:53:17,204 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@34ce23e2{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T05:53:17,204 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2a232eac{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T05:53:17,204 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T05:53:17,205 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4c4cf04c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T05:53:17,205 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5d5d1a79{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/56e147b6-47fb-9016-83f7-441745635da6/hadoop.log.dir/,STOPPED} 2024-12-08T05:53:17,206 WARN [BP-1214476892-172.17.0.2-1733637195085 heartbeating to localhost/127.0.0.1:37309 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T05:53:17,206 WARN [BP-1214476892-172.17.0.2-1733637195085 heartbeating to localhost/127.0.0.1:37309 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1214476892-172.17.0.2-1733637195085 (Datanode Uuid 8cdc78c3-7208-460d-b26f-82abaf6194f8) service to localhost/127.0.0.1:37309 2024-12-08T05:53:17,206 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T05:53:17,206 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T05:53:17,206 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/56e147b6-47fb-9016-83f7-441745635da6/cluster_26be5350-22b6-5c9e-366c-be4db198b576/data/data3/current/BP-1214476892-172.17.0.2-1733637195085 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T05:53:17,207 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/56e147b6-47fb-9016-83f7-441745635da6/cluster_26be5350-22b6-5c9e-366c-be4db198b576/data/data4/current/BP-1214476892-172.17.0.2-1733637195085 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T05:53:17,207 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T05:53:17,209 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1ca0473d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T05:53:17,209 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@63f95535{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T05:53:17,209 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T05:53:17,209 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2628bbf8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T05:53:17,209 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1fd2bdde{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/56e147b6-47fb-9016-83f7-441745635da6/hadoop.log.dir/,STOPPED} 2024-12-08T05:53:17,210 WARN [BP-1214476892-172.17.0.2-1733637195085 heartbeating to localhost/127.0.0.1:37309 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T05:53:17,210 WARN [BP-1214476892-172.17.0.2-1733637195085 heartbeating to localhost/127.0.0.1:37309 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1214476892-172.17.0.2-1733637195085 (Datanode Uuid 39915b98-90ef-467f-a124-1421cf77403e) service to localhost/127.0.0.1:37309 2024-12-08T05:53:17,211 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/56e147b6-47fb-9016-83f7-441745635da6/cluster_26be5350-22b6-5c9e-366c-be4db198b576/data/data2/current/BP-1214476892-172.17.0.2-1733637195085 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T05:53:17,211 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T05:53:17,211 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T05:53:17,211 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/56e147b6-47fb-9016-83f7-441745635da6/cluster_26be5350-22b6-5c9e-366c-be4db198b576/data/data1/current/BP-1214476892-172.17.0.2-1733637195085 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T05:53:17,211 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T05:53:17,216 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3665bd5{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-08T05:53:17,217 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@43ab2d8e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T05:53:17,217 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T05:53:17,217 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3670d9ba{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T05:53:17,217 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1f551216{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/56e147b6-47fb-9016-83f7-441745635da6/hadoop.log.dir/,STOPPED} 2024-12-08T05:53:17,223 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-08T05:53:17,236 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-08T05:53:17,246 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=271 (was 232) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1609174458) connection to localhost/127.0.0.1:37309 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37309 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:37309 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37309 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:37309 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1609174458) connection to localhost/127.0.0.1:37309 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (1609174458) connection to localhost/127.0.0.1:37309 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37309 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: globalEventExecutor-1-20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//io.netty.util.concurrent.GlobalEventExecutor.takeTask(GlobalEventExecutor.java:113) app//io.netty.util.concurrent.GlobalEventExecutor$TaskRunner.run(GlobalEventExecutor.java:259) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=541 (was 518) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=45 (was 45), ProcessCount=11 (was 11), AvailableMemoryMB=7496 (was 7509)