2024-11-10 12:58:47,728 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-10 12:58:47,744 main DEBUG Took 0.013995 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-10 12:58:47,745 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-10 12:58:47,745 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-10 12:58:47,746 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-10 12:58:47,748 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 12:58:47,756 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-10 12:58:47,767 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 12:58:47,768 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 12:58:47,769 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 12:58:47,769 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 12:58:47,770 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 12:58:47,770 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 12:58:47,771 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 12:58:47,771 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 12:58:47,771 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 12:58:47,772 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 12:58:47,772 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 12:58:47,773 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 12:58:47,773 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 12:58:47,773 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 12:58:47,774 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 12:58:47,774 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 12:58:47,774 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 12:58:47,775 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 12:58:47,775 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 12:58:47,775 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 12:58:47,776 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 12:58:47,776 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 12:58:47,776 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 12:58:47,777 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 12:58:47,777 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 12:58:47,777 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-10 12:58:47,779 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 12:58:47,780 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-10 12:58:47,781 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-10 12:58:47,782 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-10 12:58:47,783 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-10 12:58:47,783 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-10 12:58:47,791 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-10 12:58:47,793 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-10 12:58:47,795 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-10 12:58:47,795 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-10 12:58:47,796 main DEBUG createAppenders(={Console}) 2024-11-10 12:58:47,796 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-11-10 12:58:47,797 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-10 12:58:47,797 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-11-10 12:58:47,797 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-10 12:58:47,798 main DEBUG OutputStream closed 2024-11-10 12:58:47,798 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-10 12:58:47,798 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-10 12:58:47,798 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-11-10 12:58:47,865 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-10 12:58:47,867 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-10 12:58:47,868 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-10 12:58:47,869 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-10 12:58:47,869 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-10 12:58:47,870 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-10 12:58:47,870 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-10 12:58:47,870 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-10 12:58:47,871 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-10 12:58:47,871 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-10 12:58:47,871 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-10 12:58:47,871 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-10 12:58:47,872 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-10 12:58:47,872 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-10 12:58:47,872 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-10 12:58:47,872 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-10 12:58:47,873 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-10 12:58:47,873 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-10 12:58:47,876 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-10 12:58:47,876 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-11-10 12:58:47,876 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-10 12:58:47,877 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-11-10T12:58:48,118 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1b94522-9f3b-75da-fc9c-29a541d50b3a 2024-11-10 12:58:48,121 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-10 12:58:48,121 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-10T12:58:48,131 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-11-10T12:58:48,163 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=2, ProcessCount=11, AvailableMemoryMB=9661 2024-11-10T12:58:48,166 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-10T12:58:48,180 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1b94522-9f3b-75da-fc9c-29a541d50b3a/cluster_27ca9261-1007-ab21-93a9-a5d09ba32aac, deleteOnExit=true 2024-11-10T12:58:48,180 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-10T12:58:48,181 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1b94522-9f3b-75da-fc9c-29a541d50b3a/test.cache.data in system properties and HBase conf 2024-11-10T12:58:48,181 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1b94522-9f3b-75da-fc9c-29a541d50b3a/hadoop.tmp.dir in system properties and HBase conf 2024-11-10T12:58:48,182 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1b94522-9f3b-75da-fc9c-29a541d50b3a/hadoop.log.dir in system properties and HBase conf 2024-11-10T12:58:48,182 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1b94522-9f3b-75da-fc9c-29a541d50b3a/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-10T12:58:48,183 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1b94522-9f3b-75da-fc9c-29a541d50b3a/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-10T12:58:48,183 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-10T12:58:48,266 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-10T12:58:48,359 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-10T12:58:48,362 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1b94522-9f3b-75da-fc9c-29a541d50b3a/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-10T12:58:48,363 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1b94522-9f3b-75da-fc9c-29a541d50b3a/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-10T12:58:48,363 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1b94522-9f3b-75da-fc9c-29a541d50b3a/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-10T12:58:48,364 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1b94522-9f3b-75da-fc9c-29a541d50b3a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-10T12:58:48,364 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1b94522-9f3b-75da-fc9c-29a541d50b3a/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-10T12:58:48,365 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1b94522-9f3b-75da-fc9c-29a541d50b3a/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-10T12:58:48,365 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1b94522-9f3b-75da-fc9c-29a541d50b3a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-10T12:58:48,366 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1b94522-9f3b-75da-fc9c-29a541d50b3a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-10T12:58:48,366 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1b94522-9f3b-75da-fc9c-29a541d50b3a/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-10T12:58:48,367 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1b94522-9f3b-75da-fc9c-29a541d50b3a/nfs.dump.dir in system properties and HBase conf 2024-11-10T12:58:48,367 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1b94522-9f3b-75da-fc9c-29a541d50b3a/java.io.tmpdir in system properties and HBase conf 2024-11-10T12:58:48,367 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1b94522-9f3b-75da-fc9c-29a541d50b3a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-10T12:58:48,368 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1b94522-9f3b-75da-fc9c-29a541d50b3a/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-10T12:58:48,368 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1b94522-9f3b-75da-fc9c-29a541d50b3a/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-10T12:58:48,838 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-10T12:58:49,190 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-10T12:58:49,275 INFO [Time-limited test {}] log.Log(170): Logging initialized @2245ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-10T12:58:49,352 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T12:58:49,417 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T12:58:49,436 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T12:58:49,437 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T12:58:49,438 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-10T12:58:49,450 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T12:58:49,453 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1b94522-9f3b-75da-fc9c-29a541d50b3a/hadoop.log.dir/,AVAILABLE} 2024-11-10T12:58:49,454 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T12:58:49,645 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@735fa16a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1b94522-9f3b-75da-fc9c-29a541d50b3a/java.io.tmpdir/jetty-localhost-39191-hadoop-hdfs-3_4_1-tests_jar-_-any-16577243824258411177/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-10T12:58:49,654 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:39191} 2024-11-10T12:58:49,654 INFO [Time-limited test {}] server.Server(415): Started @2625ms 2024-11-10T12:58:49,683 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-10T12:58:50,042 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T12:58:50,049 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T12:58:50,050 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T12:58:50,050 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T12:58:50,051 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-10T12:58:50,051 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1b94522-9f3b-75da-fc9c-29a541d50b3a/hadoop.log.dir/,AVAILABLE} 2024-11-10T12:58:50,052 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T12:58:50,176 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7b07d1ba{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1b94522-9f3b-75da-fc9c-29a541d50b3a/java.io.tmpdir/jetty-localhost-41529-hadoop-hdfs-3_4_1-tests_jar-_-any-8511837749407410260/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T12:58:50,177 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:41529} 2024-11-10T12:58:50,177 INFO [Time-limited test {}] server.Server(415): Started @3148ms 2024-11-10T12:58:50,235 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-10T12:58:50,364 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T12:58:50,370 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T12:58:50,372 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T12:58:50,372 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T12:58:50,372 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-10T12:58:50,373 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1b94522-9f3b-75da-fc9c-29a541d50b3a/hadoop.log.dir/,AVAILABLE} 2024-11-10T12:58:50,374 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T12:58:50,515 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1bf97579{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1b94522-9f3b-75da-fc9c-29a541d50b3a/java.io.tmpdir/jetty-localhost-37091-hadoop-hdfs-3_4_1-tests_jar-_-any-15139031333710080675/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T12:58:50,516 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:37091} 2024-11-10T12:58:50,516 INFO [Time-limited test {}] server.Server(415): Started @3487ms 2024-11-10T12:58:50,519 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-10T12:58:50,679 WARN [Thread-96 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1b94522-9f3b-75da-fc9c-29a541d50b3a/cluster_27ca9261-1007-ab21-93a9-a5d09ba32aac/data/data1/current/BP-1773734076-172.17.0.2-1731243528935/current, will proceed with Du for space computation calculation, 2024-11-10T12:58:50,679 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1b94522-9f3b-75da-fc9c-29a541d50b3a/cluster_27ca9261-1007-ab21-93a9-a5d09ba32aac/data/data2/current/BP-1773734076-172.17.0.2-1731243528935/current, will proceed with Du for space computation calculation, 2024-11-10T12:58:50,679 WARN [Thread-95 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1b94522-9f3b-75da-fc9c-29a541d50b3a/cluster_27ca9261-1007-ab21-93a9-a5d09ba32aac/data/data3/current/BP-1773734076-172.17.0.2-1731243528935/current, will proceed with Du for space computation calculation, 2024-11-10T12:58:50,679 WARN [Thread-97 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1b94522-9f3b-75da-fc9c-29a541d50b3a/cluster_27ca9261-1007-ab21-93a9-a5d09ba32aac/data/data4/current/BP-1773734076-172.17.0.2-1731243528935/current, will proceed with Du for space computation calculation, 2024-11-10T12:58:50,741 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-10T12:58:50,742 WARN [Thread-82 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-10T12:58:50,810 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf9098fd7f5667a2 with lease ID 0xeadcff86418483cb: Processing first storage report for DS-5f9023b2-2fd3-4ade-a1ed-5d918c1e18d6 from datanode DatanodeRegistration(127.0.0.1:42169, datanodeUuid=1b9dd577-f21d-4c0f-8c84-741814eaa716, infoPort=36941, infoSecurePort=0, ipcPort=41261, storageInfo=lv=-57;cid=testClusterID;nsid=585065306;c=1731243528935) 2024-11-10T12:58:50,811 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf9098fd7f5667a2 with lease ID 0xeadcff86418483cb: from storage DS-5f9023b2-2fd3-4ade-a1ed-5d918c1e18d6 node DatanodeRegistration(127.0.0.1:42169, datanodeUuid=1b9dd577-f21d-4c0f-8c84-741814eaa716, infoPort=36941, infoSecurePort=0, ipcPort=41261, storageInfo=lv=-57;cid=testClusterID;nsid=585065306;c=1731243528935), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-10T12:58:50,812 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1a9e9a3f3cf26a36 with lease ID 0xeadcff86418483cc: Processing first storage report for DS-679bbf95-e96e-4cc6-b4af-c77cd6f1aa5f from datanode DatanodeRegistration(127.0.0.1:36393, datanodeUuid=56e70fc3-2b8b-43a9-b7be-a39851e441ff, infoPort=36027, infoSecurePort=0, ipcPort=45299, storageInfo=lv=-57;cid=testClusterID;nsid=585065306;c=1731243528935) 2024-11-10T12:58:50,812 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1a9e9a3f3cf26a36 with lease ID 0xeadcff86418483cc: from storage DS-679bbf95-e96e-4cc6-b4af-c77cd6f1aa5f node DatanodeRegistration(127.0.0.1:36393, datanodeUuid=56e70fc3-2b8b-43a9-b7be-a39851e441ff, infoPort=36027, infoSecurePort=0, ipcPort=45299, storageInfo=lv=-57;cid=testClusterID;nsid=585065306;c=1731243528935), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T12:58:50,813 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf9098fd7f5667a2 with lease ID 0xeadcff86418483cb: Processing first storage report for DS-917c48d3-17c8-4fd6-8407-8713dce2db9c from datanode DatanodeRegistration(127.0.0.1:42169, datanodeUuid=1b9dd577-f21d-4c0f-8c84-741814eaa716, infoPort=36941, infoSecurePort=0, ipcPort=41261, storageInfo=lv=-57;cid=testClusterID;nsid=585065306;c=1731243528935) 2024-11-10T12:58:50,813 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf9098fd7f5667a2 with lease ID 0xeadcff86418483cb: from storage DS-917c48d3-17c8-4fd6-8407-8713dce2db9c node DatanodeRegistration(127.0.0.1:42169, datanodeUuid=1b9dd577-f21d-4c0f-8c84-741814eaa716, infoPort=36941, infoSecurePort=0, ipcPort=41261, storageInfo=lv=-57;cid=testClusterID;nsid=585065306;c=1731243528935), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T12:58:50,813 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1a9e9a3f3cf26a36 with lease ID 0xeadcff86418483cc: Processing first storage report for DS-63ca097e-4dcd-4347-8144-f23d0a5ded71 from datanode DatanodeRegistration(127.0.0.1:36393, datanodeUuid=56e70fc3-2b8b-43a9-b7be-a39851e441ff, infoPort=36027, infoSecurePort=0, ipcPort=45299, storageInfo=lv=-57;cid=testClusterID;nsid=585065306;c=1731243528935) 2024-11-10T12:58:50,814 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1a9e9a3f3cf26a36 with lease ID 0xeadcff86418483cc: from storage DS-63ca097e-4dcd-4347-8144-f23d0a5ded71 node DatanodeRegistration(127.0.0.1:36393, datanodeUuid=56e70fc3-2b8b-43a9-b7be-a39851e441ff, infoPort=36027, infoSecurePort=0, ipcPort=45299, storageInfo=lv=-57;cid=testClusterID;nsid=585065306;c=1731243528935), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-10T12:58:50,944 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1b94522-9f3b-75da-fc9c-29a541d50b3a 2024-11-10T12:58:51,021 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1b94522-9f3b-75da-fc9c-29a541d50b3a/cluster_27ca9261-1007-ab21-93a9-a5d09ba32aac/zookeeper_0, clientPort=59930, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1b94522-9f3b-75da-fc9c-29a541d50b3a/cluster_27ca9261-1007-ab21-93a9-a5d09ba32aac/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1b94522-9f3b-75da-fc9c-29a541d50b3a/cluster_27ca9261-1007-ab21-93a9-a5d09ba32aac/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-10T12:58:51,033 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=59930 2024-11-10T12:58:51,047 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T12:58:51,051 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T12:58:51,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36393 is added to blk_1073741825_1001 (size=7) 2024-11-10T12:58:51,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42169 is added to blk_1073741825_1001 (size=7) 2024-11-10T12:58:51,714 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb with version=8 2024-11-10T12:58:51,715 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/hbase-staging 2024-11-10T12:58:51,808 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-10T12:58:52,061 INFO [Time-limited test {}] client.ConnectionUtils(128): master/3857ccc89b65:0 server-side Connection retries=45 2024-11-10T12:58:52,072 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T12:58:52,073 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-10T12:58:52,078 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-10T12:58:52,078 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T12:58:52,079 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-10T12:58:52,251 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-10T12:58:52,313 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-10T12:58:52,322 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-10T12:58:52,326 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-10T12:58:52,355 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 19825 (auto-detected) 2024-11-10T12:58:52,357 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-10T12:58:52,376 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43891 2024-11-10T12:58:52,398 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:43891 connecting to ZooKeeper ensemble=127.0.0.1:59930 2024-11-10T12:58:52,430 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:438910x0, quorum=127.0.0.1:59930, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-10T12:58:52,433 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:43891-0x10101f5c5890000 connected 2024-11-10T12:58:52,461 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T12:58:52,464 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T12:58:52,477 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43891-0x10101f5c5890000, quorum=127.0.0.1:59930, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T12:58:52,481 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb, hbase.cluster.distributed=false 2024-11-10T12:58:52,505 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43891-0x10101f5c5890000, quorum=127.0.0.1:59930, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-10T12:58:52,511 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43891 2024-11-10T12:58:52,512 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43891 2024-11-10T12:58:52,514 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43891 2024-11-10T12:58:52,515 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43891 2024-11-10T12:58:52,515 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43891 2024-11-10T12:58:52,633 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3857ccc89b65:0 server-side Connection retries=45 2024-11-10T12:58:52,635 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T12:58:52,636 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-10T12:58:52,636 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-10T12:58:52,636 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T12:58:52,636 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-10T12:58:52,640 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-10T12:58:52,643 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-10T12:58:52,644 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35149 2024-11-10T12:58:52,646 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35149 connecting to ZooKeeper ensemble=127.0.0.1:59930 2024-11-10T12:58:52,648 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T12:58:52,653 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T12:58:52,661 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:351490x0, quorum=127.0.0.1:59930, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-10T12:58:52,662 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35149-0x10101f5c5890001 connected 2024-11-10T12:58:52,662 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35149-0x10101f5c5890001, quorum=127.0.0.1:59930, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T12:58:52,667 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-10T12:58:52,674 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-10T12:58:52,676 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35149-0x10101f5c5890001, quorum=127.0.0.1:59930, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-10T12:58:52,681 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35149-0x10101f5c5890001, quorum=127.0.0.1:59930, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-10T12:58:52,682 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35149 2024-11-10T12:58:52,682 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35149 2024-11-10T12:58:52,688 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35149 2024-11-10T12:58:52,689 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35149 2024-11-10T12:58:52,691 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35149 2024-11-10T12:58:52,708 DEBUG [M:0;3857ccc89b65:43891 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;3857ccc89b65:43891 2024-11-10T12:58:52,709 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/3857ccc89b65,43891,1731243531861 2024-11-10T12:58:52,717 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35149-0x10101f5c5890001, quorum=127.0.0.1:59930, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T12:58:52,717 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43891-0x10101f5c5890000, quorum=127.0.0.1:59930, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T12:58:52,721 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43891-0x10101f5c5890000, quorum=127.0.0.1:59930, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/3857ccc89b65,43891,1731243531861 2024-11-10T12:58:52,743 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35149-0x10101f5c5890001, quorum=127.0.0.1:59930, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-10T12:58:52,743 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43891-0x10101f5c5890000, quorum=127.0.0.1:59930, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T12:58:52,743 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35149-0x10101f5c5890001, quorum=127.0.0.1:59930, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T12:58:52,745 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43891-0x10101f5c5890000, quorum=127.0.0.1:59930, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-10T12:58:52,746 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/3857ccc89b65,43891,1731243531861 from backup master directory 2024-11-10T12:58:52,750 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35149-0x10101f5c5890001, quorum=127.0.0.1:59930, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T12:58:52,750 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43891-0x10101f5c5890000, quorum=127.0.0.1:59930, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/3857ccc89b65,43891,1731243531861 2024-11-10T12:58:52,750 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43891-0x10101f5c5890000, quorum=127.0.0.1:59930, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T12:58:52,751 WARN [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-10T12:58:52,751 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=3857ccc89b65,43891,1731243531861 2024-11-10T12:58:52,754 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-10T12:58:52,755 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-10T12:58:52,823 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/hbase.id] with ID: 83f15e7a-e9ad-4c80-b7d4-2537a9c8578f 2024-11-10T12:58:52,823 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/.tmp/hbase.id 2024-11-10T12:58:52,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36393 is added to blk_1073741826_1002 (size=42) 2024-11-10T12:58:52,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42169 is added to blk_1073741826_1002 (size=42) 2024-11-10T12:58:52,838 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/.tmp/hbase.id]:[hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/hbase.id] 2024-11-10T12:58:52,888 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T12:58:52,892 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-10T12:58:52,915 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 19ms. 2024-11-10T12:58:52,920 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43891-0x10101f5c5890000, quorum=127.0.0.1:59930, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T12:58:52,920 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35149-0x10101f5c5890001, quorum=127.0.0.1:59930, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T12:58:52,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36393 is added to blk_1073741827_1003 (size=196) 2024-11-10T12:58:52,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42169 is added to blk_1073741827_1003 (size=196) 2024-11-10T12:58:52,957 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-10T12:58:52,959 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-10T12:58:52,965 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-10T12:58:53,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36393 is added to blk_1073741828_1004 (size=1189) 2024-11-10T12:58:53,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42169 is added to blk_1073741828_1004 (size=1189) 2024-11-10T12:58:53,019 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/MasterData/data/master/store 2024-11-10T12:58:53,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36393 is added to blk_1073741829_1005 (size=34) 2024-11-10T12:58:53,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42169 is added to blk_1073741829_1005 (size=34) 2024-11-10T12:58:53,047 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-10T12:58:53,050 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T12:58:53,051 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-10T12:58:53,051 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T12:58:53,051 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T12:58:53,053 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-10T12:58:53,053 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T12:58:53,053 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T12:58:53,054 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731243533051Disabling compacts and flushes for region at 1731243533051Disabling writes for close at 1731243533053 (+2 ms)Writing region close event to WAL at 1731243533053Closed at 1731243533053 2024-11-10T12:58:53,057 WARN [master/3857ccc89b65:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/MasterData/data/master/store/.initializing 2024-11-10T12:58:53,057 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/MasterData/WALs/3857ccc89b65,43891,1731243531861 2024-11-10T12:58:53,079 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3857ccc89b65%2C43891%2C1731243531861, suffix=, logDir=hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/MasterData/WALs/3857ccc89b65,43891,1731243531861, archiveDir=hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/MasterData/oldWALs, maxLogs=10 2024-11-10T12:58:53,087 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3857ccc89b65%2C43891%2C1731243531861.1731243533083 2024-11-10T12:58:53,108 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/MasterData/WALs/3857ccc89b65,43891,1731243531861/3857ccc89b65%2C43891%2C1731243531861.1731243533083 2024-11-10T12:58:53,121 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36027:36027),(127.0.0.1/127.0.0.1:36941:36941)] 2024-11-10T12:58:53,122 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-10T12:58:53,123 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T12:58:53,126 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T12:58:53,127 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T12:58:53,166 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T12:58:53,193 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-10T12:58:53,196 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T12:58:53,199 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T12:58:53,200 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T12:58:53,202 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-10T12:58:53,203 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T12:58:53,203 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T12:58:53,204 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T12:58:53,206 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-10T12:58:53,206 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T12:58:53,207 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T12:58:53,207 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T12:58:53,210 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-10T12:58:53,210 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T12:58:53,211 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T12:58:53,212 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T12:58:53,216 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-10T12:58:53,217 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-10T12:58:53,223 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T12:58:53,223 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T12:58:53,227 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-10T12:58:53,230 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T12:58:53,235 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T12:58:53,236 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=745389, jitterRate=-0.05218946933746338}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-10T12:58:53,243 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731243533140Initializing all the Stores at 1731243533142 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731243533143 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731243533143Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731243533144 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731243533144Cleaning up temporary data from old regions at 1731243533224 (+80 ms)Region opened successfully at 1731243533243 (+19 ms) 2024-11-10T12:58:53,245 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-10T12:58:53,282 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@66f591de, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3857ccc89b65/172.17.0.2:0 2024-11-10T12:58:53,315 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-10T12:58:53,332 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-10T12:58:53,333 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-10T12:58:53,337 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-10T12:58:53,339 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-10T12:58:53,347 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 7 msec 2024-11-10T12:58:53,347 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-10T12:58:53,377 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-10T12:58:53,386 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43891-0x10101f5c5890000, quorum=127.0.0.1:59930, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-10T12:58:53,388 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-10T12:58:53,391 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-10T12:58:53,392 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43891-0x10101f5c5890000, quorum=127.0.0.1:59930, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-10T12:58:53,394 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-10T12:58:53,397 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-10T12:58:53,401 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43891-0x10101f5c5890000, quorum=127.0.0.1:59930, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-10T12:58:53,403 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-10T12:58:53,404 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43891-0x10101f5c5890000, quorum=127.0.0.1:59930, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-10T12:58:53,406 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-10T12:58:53,423 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43891-0x10101f5c5890000, quorum=127.0.0.1:59930, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-10T12:58:53,425 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-10T12:58:53,429 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43891-0x10101f5c5890000, quorum=127.0.0.1:59930, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-10T12:58:53,429 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35149-0x10101f5c5890001, quorum=127.0.0.1:59930, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-10T12:58:53,429 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43891-0x10101f5c5890000, quorum=127.0.0.1:59930, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T12:58:53,429 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35149-0x10101f5c5890001, quorum=127.0.0.1:59930, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T12:58:53,432 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=3857ccc89b65,43891,1731243531861, sessionid=0x10101f5c5890000, setting cluster-up flag (Was=false) 2024-11-10T12:58:53,446 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35149-0x10101f5c5890001, quorum=127.0.0.1:59930, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T12:58:53,446 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43891-0x10101f5c5890000, quorum=127.0.0.1:59930, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T12:58:53,452 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-10T12:58:53,454 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3857ccc89b65,43891,1731243531861 2024-11-10T12:58:53,461 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43891-0x10101f5c5890000, quorum=127.0.0.1:59930, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T12:58:53,461 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35149-0x10101f5c5890001, quorum=127.0.0.1:59930, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T12:58:53,468 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-10T12:58:53,470 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3857ccc89b65,43891,1731243531861 2024-11-10T12:58:53,476 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-10T12:58:53,495 INFO [RS:0;3857ccc89b65:35149 {}] regionserver.HRegionServer(746): ClusterId : 83f15e7a-e9ad-4c80-b7d4-2537a9c8578f 2024-11-10T12:58:53,498 DEBUG [RS:0;3857ccc89b65:35149 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-10T12:58:53,504 DEBUG [RS:0;3857ccc89b65:35149 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-10T12:58:53,504 DEBUG [RS:0;3857ccc89b65:35149 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-10T12:58:53,507 DEBUG [RS:0;3857ccc89b65:35149 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-10T12:58:53,508 DEBUG [RS:0;3857ccc89b65:35149 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@77c9e693, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3857ccc89b65/172.17.0.2:0 2024-11-10T12:58:53,530 DEBUG [RS:0;3857ccc89b65:35149 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;3857ccc89b65:35149 2024-11-10T12:58:53,534 INFO [RS:0;3857ccc89b65:35149 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-10T12:58:53,534 INFO [RS:0;3857ccc89b65:35149 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-10T12:58:53,535 DEBUG [RS:0;3857ccc89b65:35149 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-10T12:58:53,538 INFO [RS:0;3857ccc89b65:35149 {}] regionserver.HRegionServer(2659): reportForDuty to master=3857ccc89b65,43891,1731243531861 with port=35149, startcode=1731243532592 2024-11-10T12:58:53,553 DEBUG [RS:0;3857ccc89b65:35149 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-10T12:58:53,553 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-10T12:58:53,563 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-10T12:58:53,569 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-10T12:58:53,575 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 3857ccc89b65,43891,1731243531861 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-10T12:58:53,582 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/3857ccc89b65:0, corePoolSize=5, maxPoolSize=5 2024-11-10T12:58:53,582 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/3857ccc89b65:0, corePoolSize=5, maxPoolSize=5 2024-11-10T12:58:53,582 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/3857ccc89b65:0, corePoolSize=5, maxPoolSize=5 2024-11-10T12:58:53,582 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/3857ccc89b65:0, corePoolSize=5, maxPoolSize=5 2024-11-10T12:58:53,582 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/3857ccc89b65:0, corePoolSize=10, maxPoolSize=10 2024-11-10T12:58:53,582 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T12:58:53,583 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/3857ccc89b65:0, corePoolSize=2, maxPoolSize=2 2024-11-10T12:58:53,583 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T12:58:53,588 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731243563588 2024-11-10T12:58:53,589 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-10T12:58:53,589 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-10T12:58:53,590 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-10T12:58:53,591 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-10T12:58:53,594 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-10T12:58:53,595 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-10T12:58:53,595 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-10T12:58:53,595 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-10T12:58:53,597 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T12:58:53,597 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-10T12:58:53,596 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-10T12:58:53,599 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-10T12:58:53,600 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-10T12:58:53,601 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-10T12:58:53,605 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-10T12:58:53,605 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-10T12:58:53,609 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/3857ccc89b65:0:becomeActiveMaster-HFileCleaner.large.0-1731243533607,5,FailOnTimeoutGroup] 2024-11-10T12:58:53,610 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/3857ccc89b65:0:becomeActiveMaster-HFileCleaner.small.0-1731243533609,5,FailOnTimeoutGroup] 2024-11-10T12:58:53,610 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-10T12:58:53,610 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-10T12:58:53,611 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-10T12:58:53,612 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-10T12:58:53,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36393 is added to blk_1073741831_1007 (size=1321) 2024-11-10T12:58:53,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42169 is added to blk_1073741831_1007 (size=1321) 2024-11-10T12:58:53,616 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-10T12:58:53,617 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb 2024-11-10T12:58:53,633 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52329, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-10T12:58:53,640 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43891 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3857ccc89b65,35149,1731243532592 2024-11-10T12:58:53,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36393 is added to blk_1073741832_1008 (size=32) 2024-11-10T12:58:53,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42169 is added to blk_1073741832_1008 (size=32) 2024-11-10T12:58:53,643 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43891 {}] master.ServerManager(517): Registering regionserver=3857ccc89b65,35149,1731243532592 2024-11-10T12:58:53,645 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T12:58:53,655 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-10T12:58:53,658 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-10T12:58:53,658 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T12:58:53,658 DEBUG [RS:0;3857ccc89b65:35149 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb 2024-11-10T12:58:53,658 DEBUG [RS:0;3857ccc89b65:35149 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44081 2024-11-10T12:58:53,658 DEBUG [RS:0;3857ccc89b65:35149 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-10T12:58:53,659 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T12:58:53,659 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-10T12:58:53,662 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-10T12:58:53,662 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T12:58:53,663 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43891-0x10101f5c5890000, quorum=127.0.0.1:59930, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-10T12:58:53,663 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T12:58:53,663 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-10T12:58:53,663 DEBUG [RS:0;3857ccc89b65:35149 {}] zookeeper.ZKUtil(111): regionserver:35149-0x10101f5c5890001, quorum=127.0.0.1:59930, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3857ccc89b65,35149,1731243532592 2024-11-10T12:58:53,664 WARN [RS:0;3857ccc89b65:35149 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-10T12:58:53,664 INFO [RS:0;3857ccc89b65:35149 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-10T12:58:53,664 DEBUG [RS:0;3857ccc89b65:35149 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/WALs/3857ccc89b65,35149,1731243532592 2024-11-10T12:58:53,666 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-10T12:58:53,666 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T12:58:53,666 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3857ccc89b65,35149,1731243532592] 2024-11-10T12:58:53,670 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T12:58:53,670 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-10T12:58:53,672 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-10T12:58:53,672 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T12:58:53,673 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T12:58:53,674 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-10T12:58:53,675 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/hbase/meta/1588230740 2024-11-10T12:58:53,676 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/hbase/meta/1588230740 2024-11-10T12:58:53,678 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-10T12:58:53,678 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-10T12:58:53,679 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-10T12:58:53,682 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-10T12:58:53,685 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T12:58:53,686 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=779190, jitterRate=-0.009209737181663513}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-10T12:58:53,690 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731243533645Initializing all the Stores at 1731243533647 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731243533647Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731243533654 (+7 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731243533654Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731243533655 (+1 ms)Cleaning up temporary data from old regions at 1731243533678 (+23 ms)Region opened successfully at 1731243533689 (+11 ms) 2024-11-10T12:58:53,690 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-10T12:58:53,690 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-10T12:58:53,690 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-10T12:58:53,690 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-10T12:58:53,690 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-10T12:58:53,692 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-10T12:58:53,692 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731243533690Disabling compacts and flushes for region at 1731243533690Disabling writes for close at 1731243533690Writing region close event to WAL at 1731243533691 (+1 ms)Closed at 1731243533692 (+1 ms) 2024-11-10T12:58:53,695 INFO [RS:0;3857ccc89b65:35149 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-10T12:58:53,695 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-10T12:58:53,695 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-10T12:58:53,702 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-10T12:58:53,711 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-10T12:58:53,713 INFO [RS:0;3857ccc89b65:35149 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-10T12:58:53,714 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-10T12:58:53,720 INFO [RS:0;3857ccc89b65:35149 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-10T12:58:53,720 INFO [RS:0;3857ccc89b65:35149 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T12:58:53,721 INFO [RS:0;3857ccc89b65:35149 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-10T12:58:53,729 INFO [RS:0;3857ccc89b65:35149 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-10T12:58:53,730 INFO [RS:0;3857ccc89b65:35149 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-10T12:58:53,731 DEBUG [RS:0;3857ccc89b65:35149 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T12:58:53,731 DEBUG [RS:0;3857ccc89b65:35149 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T12:58:53,731 DEBUG [RS:0;3857ccc89b65:35149 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T12:58:53,731 DEBUG [RS:0;3857ccc89b65:35149 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T12:58:53,731 DEBUG [RS:0;3857ccc89b65:35149 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T12:58:53,731 DEBUG [RS:0;3857ccc89b65:35149 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3857ccc89b65:0, corePoolSize=2, maxPoolSize=2 2024-11-10T12:58:53,732 DEBUG [RS:0;3857ccc89b65:35149 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T12:58:53,732 DEBUG [RS:0;3857ccc89b65:35149 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T12:58:53,732 DEBUG [RS:0;3857ccc89b65:35149 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T12:58:53,732 DEBUG [RS:0;3857ccc89b65:35149 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T12:58:53,732 DEBUG [RS:0;3857ccc89b65:35149 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T12:58:53,733 DEBUG [RS:0;3857ccc89b65:35149 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T12:58:53,733 DEBUG [RS:0;3857ccc89b65:35149 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3857ccc89b65:0, corePoolSize=3, maxPoolSize=3 2024-11-10T12:58:53,733 DEBUG [RS:0;3857ccc89b65:35149 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3857ccc89b65:0, corePoolSize=3, maxPoolSize=3 2024-11-10T12:58:53,735 INFO [RS:0;3857ccc89b65:35149 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T12:58:53,735 INFO [RS:0;3857ccc89b65:35149 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T12:58:53,735 INFO [RS:0;3857ccc89b65:35149 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T12:58:53,735 INFO [RS:0;3857ccc89b65:35149 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-10T12:58:53,735 INFO [RS:0;3857ccc89b65:35149 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-10T12:58:53,735 INFO [RS:0;3857ccc89b65:35149 {}] hbase.ChoreService(168): Chore ScheduledChore name=3857ccc89b65,35149,1731243532592-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-10T12:58:53,754 INFO [RS:0;3857ccc89b65:35149 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-10T12:58:53,756 INFO [RS:0;3857ccc89b65:35149 {}] hbase.ChoreService(168): Chore ScheduledChore name=3857ccc89b65,35149,1731243532592-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T12:58:53,757 INFO [RS:0;3857ccc89b65:35149 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T12:58:53,757 INFO [RS:0;3857ccc89b65:35149 {}] regionserver.Replication(171): 3857ccc89b65,35149,1731243532592 started 2024-11-10T12:58:53,777 INFO [RS:0;3857ccc89b65:35149 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T12:58:53,777 INFO [RS:0;3857ccc89b65:35149 {}] regionserver.HRegionServer(1482): Serving as 3857ccc89b65,35149,1731243532592, RpcServer on 3857ccc89b65/172.17.0.2:35149, sessionid=0x10101f5c5890001 2024-11-10T12:58:53,778 DEBUG [RS:0;3857ccc89b65:35149 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-10T12:58:53,778 DEBUG [RS:0;3857ccc89b65:35149 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3857ccc89b65,35149,1731243532592 2024-11-10T12:58:53,779 DEBUG [RS:0;3857ccc89b65:35149 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3857ccc89b65,35149,1731243532592' 2024-11-10T12:58:53,779 DEBUG [RS:0;3857ccc89b65:35149 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-10T12:58:53,780 DEBUG [RS:0;3857ccc89b65:35149 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-10T12:58:53,781 DEBUG [RS:0;3857ccc89b65:35149 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-10T12:58:53,781 DEBUG [RS:0;3857ccc89b65:35149 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-10T12:58:53,781 DEBUG [RS:0;3857ccc89b65:35149 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3857ccc89b65,35149,1731243532592 2024-11-10T12:58:53,781 DEBUG [RS:0;3857ccc89b65:35149 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3857ccc89b65,35149,1731243532592' 2024-11-10T12:58:53,781 DEBUG [RS:0;3857ccc89b65:35149 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-10T12:58:53,782 DEBUG [RS:0;3857ccc89b65:35149 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-10T12:58:53,783 DEBUG [RS:0;3857ccc89b65:35149 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-10T12:58:53,783 INFO [RS:0;3857ccc89b65:35149 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-10T12:58:53,783 INFO [RS:0;3857ccc89b65:35149 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-10T12:58:53,865 WARN [3857ccc89b65:43891 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-10T12:58:53,891 INFO [RS:0;3857ccc89b65:35149 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3857ccc89b65%2C35149%2C1731243532592, suffix=, logDir=hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/WALs/3857ccc89b65,35149,1731243532592, archiveDir=hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/oldWALs, maxLogs=32 2024-11-10T12:58:53,894 INFO [RS:0;3857ccc89b65:35149 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3857ccc89b65%2C35149%2C1731243532592.1731243533894 2024-11-10T12:58:53,904 INFO [RS:0;3857ccc89b65:35149 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/WALs/3857ccc89b65,35149,1731243532592/3857ccc89b65%2C35149%2C1731243532592.1731243533894 2024-11-10T12:58:53,905 DEBUG [RS:0;3857ccc89b65:35149 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36941:36941),(127.0.0.1/127.0.0.1:36027:36027)] 2024-11-10T12:58:54,118 DEBUG [3857ccc89b65:43891 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-10T12:58:54,130 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=3857ccc89b65,35149,1731243532592 2024-11-10T12:58:54,137 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3857ccc89b65,35149,1731243532592, state=OPENING 2024-11-10T12:58:54,143 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-10T12:58:54,144 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43891-0x10101f5c5890000, quorum=127.0.0.1:59930, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T12:58:54,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35149-0x10101f5c5890001, quorum=127.0.0.1:59930, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T12:58:54,145 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T12:58:54,145 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T12:58:54,147 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-10T12:58:54,149 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=3857ccc89b65,35149,1731243532592}] 2024-11-10T12:58:54,324 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-10T12:58:54,328 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36363, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-10T12:58:54,342 INFO [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-10T12:58:54,343 INFO [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-10T12:58:54,347 INFO [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3857ccc89b65%2C35149%2C1731243532592.meta, suffix=.meta, logDir=hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/WALs/3857ccc89b65,35149,1731243532592, archiveDir=hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/oldWALs, maxLogs=32 2024-11-10T12:58:54,349 INFO [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 3857ccc89b65%2C35149%2C1731243532592.meta.1731243534349.meta 2024-11-10T12:58:54,358 INFO [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/WALs/3857ccc89b65,35149,1731243532592/3857ccc89b65%2C35149%2C1731243532592.meta.1731243534349.meta 2024-11-10T12:58:54,359 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36027:36027),(127.0.0.1/127.0.0.1:36941:36941)] 2024-11-10T12:58:54,360 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-10T12:58:54,363 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-10T12:58:54,366 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-10T12:58:54,371 INFO [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-10T12:58:54,375 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-10T12:58:54,376 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T12:58:54,376 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-10T12:58:54,376 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-10T12:58:54,380 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-10T12:58:54,381 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-10T12:58:54,381 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T12:58:54,382 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T12:58:54,382 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-10T12:58:54,384 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-10T12:58:54,384 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T12:58:54,385 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T12:58:54,385 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-10T12:58:54,386 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-10T12:58:54,386 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T12:58:54,387 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T12:58:54,387 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-10T12:58:54,389 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-10T12:58:54,389 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T12:58:54,389 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T12:58:54,390 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-10T12:58:54,391 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/hbase/meta/1588230740 2024-11-10T12:58:54,393 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/hbase/meta/1588230740 2024-11-10T12:58:54,396 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-10T12:58:54,396 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-10T12:58:54,397 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-10T12:58:54,400 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-10T12:58:54,401 INFO [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=719791, jitterRate=-0.08473849296569824}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-10T12:58:54,401 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-10T12:58:54,403 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731243534377Writing region info on filesystem at 1731243534377Initializing all the Stores at 1731243534379 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731243534379Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731243534380 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731243534380Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731243534380Cleaning up temporary data from old regions at 1731243534396 (+16 ms)Running coprocessor post-open hooks at 1731243534401 (+5 ms)Region opened successfully at 1731243534403 (+2 ms) 2024-11-10T12:58:54,410 INFO [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731243534315 2024-11-10T12:58:54,422 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-10T12:58:54,423 INFO [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-10T12:58:54,425 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=3857ccc89b65,35149,1731243532592 2024-11-10T12:58:54,427 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3857ccc89b65,35149,1731243532592, state=OPEN 2024-11-10T12:58:54,433 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35149-0x10101f5c5890001, quorum=127.0.0.1:59930, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-10T12:58:54,433 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43891-0x10101f5c5890000, quorum=127.0.0.1:59930, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-10T12:58:54,433 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T12:58:54,433 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T12:58:54,433 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=3857ccc89b65,35149,1731243532592 2024-11-10T12:58:54,438 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-10T12:58:54,439 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=3857ccc89b65,35149,1731243532592 in 284 msec 2024-11-10T12:58:54,446 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-10T12:58:54,446 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 739 msec 2024-11-10T12:58:54,448 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-10T12:58:54,448 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-10T12:58:54,470 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-10T12:58:54,471 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3857ccc89b65,35149,1731243532592, seqNum=-1] 2024-11-10T12:58:54,493 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-10T12:58:54,495 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45617, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-10T12:58:54,518 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.0110 sec 2024-11-10T12:58:54,518 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731243534518, completionTime=-1 2024-11-10T12:58:54,521 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-10T12:58:54,521 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-10T12:58:54,555 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-10T12:58:54,555 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731243594555 2024-11-10T12:58:54,555 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731243654555 2024-11-10T12:58:54,555 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 34 msec 2024-11-10T12:58:54,558 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3857ccc89b65,43891,1731243531861-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T12:58:54,559 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3857ccc89b65,43891,1731243531861-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T12:58:54,559 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3857ccc89b65,43891,1731243531861-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T12:58:54,561 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-3857ccc89b65:43891, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T12:58:54,561 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-10T12:58:54,562 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-10T12:58:54,569 DEBUG [master/3857ccc89b65:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-10T12:58:54,595 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.844sec 2024-11-10T12:58:54,596 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-10T12:58:54,598 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-10T12:58:54,599 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-10T12:58:54,599 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-10T12:58:54,600 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-10T12:58:54,601 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3857ccc89b65,43891,1731243531861-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-10T12:58:54,601 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3857ccc89b65,43891,1731243531861-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-10T12:58:54,612 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-10T12:58:54,613 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-10T12:58:54,613 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3857ccc89b65,43891,1731243531861-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T12:58:54,706 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@b598c24, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-10T12:58:54,709 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-10T12:58:54,709 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-10T12:58:54,712 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 3857ccc89b65,43891,-1 for getting cluster id 2024-11-10T12:58:54,715 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-10T12:58:54,724 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '83f15e7a-e9ad-4c80-b7d4-2537a9c8578f' 2024-11-10T12:58:54,728 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-10T12:58:54,728 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "83f15e7a-e9ad-4c80-b7d4-2537a9c8578f" 2024-11-10T12:58:54,728 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6b60c6e9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-10T12:58:54,728 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3857ccc89b65,43891,-1] 2024-11-10T12:58:54,731 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-10T12:58:54,733 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T12:58:54,735 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36550, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-10T12:58:54,738 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@36e15a4d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-10T12:58:54,739 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-10T12:58:54,746 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3857ccc89b65,35149,1731243532592, seqNum=-1] 2024-11-10T12:58:54,746 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-10T12:58:54,749 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54130, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-10T12:58:54,772 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=3857ccc89b65,43891,1731243531861 2024-11-10T12:58:54,772 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T12:58:54,783 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-10T12:58:54,788 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-10T12:58:54,793 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 3857ccc89b65,43891,1731243531861 2024-11-10T12:58:54,797 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@50099097 2024-11-10T12:58:54,798 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-10T12:58:54,801 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36566, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-10T12:58:54,804 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43891 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-10T12:58:54,804 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43891 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-10T12:58:54,808 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43891 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-10T12:58:54,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43891 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-11-10T12:58:54,820 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-10T12:58:54,822 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43891 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-11-10T12:58:54,823 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T12:58:54,825 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-10T12:58:54,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43891 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-10T12:58:54,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42169 is added to blk_1073741835_1011 (size=389) 2024-11-10T12:58:54,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36393 is added to blk_1073741835_1011 (size=389) 2024-11-10T12:58:55,279 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 20e7219996e942a171d88ffdbef532ab, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731243534803.20e7219996e942a171d88ffdbef532ab.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb 2024-11-10T12:58:55,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36393 is added to blk_1073741836_1012 (size=72) 2024-11-10T12:58:55,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42169 is added to blk_1073741836_1012 (size=72) 2024-11-10T12:58:55,292 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731243534803.20e7219996e942a171d88ffdbef532ab.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T12:58:55,292 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 20e7219996e942a171d88ffdbef532ab, disabling compactions & flushes 2024-11-10T12:58:55,292 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731243534803.20e7219996e942a171d88ffdbef532ab. 2024-11-10T12:58:55,292 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731243534803.20e7219996e942a171d88ffdbef532ab. 2024-11-10T12:58:55,292 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731243534803.20e7219996e942a171d88ffdbef532ab. after waiting 0 ms 2024-11-10T12:58:55,292 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731243534803.20e7219996e942a171d88ffdbef532ab. 2024-11-10T12:58:55,292 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731243534803.20e7219996e942a171d88ffdbef532ab. 2024-11-10T12:58:55,292 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 20e7219996e942a171d88ffdbef532ab: Waiting for close lock at 1731243535292Disabling compacts and flushes for region at 1731243535292Disabling writes for close at 1731243535292Writing region close event to WAL at 1731243535292Closed at 1731243535292 2024-11-10T12:58:55,294 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-10T12:58:55,299 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1731243534803.20e7219996e942a171d88ffdbef532ab.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1731243535294"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731243535294"}]},"ts":"1731243535294"} 2024-11-10T12:58:55,305 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-10T12:58:55,307 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-10T12:58:55,310 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731243535307"}]},"ts":"1731243535307"} 2024-11-10T12:58:55,315 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-11-10T12:58:55,317 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=20e7219996e942a171d88ffdbef532ab, ASSIGN}] 2024-11-10T12:58:55,319 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=20e7219996e942a171d88ffdbef532ab, ASSIGN 2024-11-10T12:58:55,321 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=20e7219996e942a171d88ffdbef532ab, ASSIGN; state=OFFLINE, location=3857ccc89b65,35149,1731243532592; forceNewPlan=false, retain=false 2024-11-10T12:58:55,472 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=20e7219996e942a171d88ffdbef532ab, regionState=OPENING, regionLocation=3857ccc89b65,35149,1731243532592 2024-11-10T12:58:55,478 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=20e7219996e942a171d88ffdbef532ab, ASSIGN because future has completed 2024-11-10T12:58:55,479 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 20e7219996e942a171d88ffdbef532ab, server=3857ccc89b65,35149,1731243532592}] 2024-11-10T12:58:55,640 INFO [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1731243534803.20e7219996e942a171d88ffdbef532ab. 2024-11-10T12:58:55,641 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 20e7219996e942a171d88ffdbef532ab, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731243534803.20e7219996e942a171d88ffdbef532ab.', STARTKEY => '', ENDKEY => ''} 2024-11-10T12:58:55,641 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling 20e7219996e942a171d88ffdbef532ab 2024-11-10T12:58:55,641 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731243534803.20e7219996e942a171d88ffdbef532ab.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T12:58:55,641 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 20e7219996e942a171d88ffdbef532ab 2024-11-10T12:58:55,642 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 20e7219996e942a171d88ffdbef532ab 2024-11-10T12:58:55,644 INFO [StoreOpener-20e7219996e942a171d88ffdbef532ab-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 20e7219996e942a171d88ffdbef532ab 2024-11-10T12:58:55,646 INFO [StoreOpener-20e7219996e942a171d88ffdbef532ab-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 20e7219996e942a171d88ffdbef532ab columnFamilyName info 2024-11-10T12:58:55,647 DEBUG [StoreOpener-20e7219996e942a171d88ffdbef532ab-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T12:58:55,648 INFO [StoreOpener-20e7219996e942a171d88ffdbef532ab-1 {}] regionserver.HStore(327): Store=20e7219996e942a171d88ffdbef532ab/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T12:58:55,648 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 20e7219996e942a171d88ffdbef532ab 2024-11-10T12:58:55,649 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/default/TestLogRolling-testSlowSyncLogRolling/20e7219996e942a171d88ffdbef532ab 2024-11-10T12:58:55,650 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/default/TestLogRolling-testSlowSyncLogRolling/20e7219996e942a171d88ffdbef532ab 2024-11-10T12:58:55,651 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 20e7219996e942a171d88ffdbef532ab 2024-11-10T12:58:55,651 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 20e7219996e942a171d88ffdbef532ab 2024-11-10T12:58:55,653 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 20e7219996e942a171d88ffdbef532ab 2024-11-10T12:58:55,657 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/default/TestLogRolling-testSlowSyncLogRolling/20e7219996e942a171d88ffdbef532ab/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T12:58:55,657 INFO [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 20e7219996e942a171d88ffdbef532ab; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=789205, jitterRate=0.0035266727209091187}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-10T12:58:55,657 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 20e7219996e942a171d88ffdbef532ab 2024-11-10T12:58:55,659 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 20e7219996e942a171d88ffdbef532ab: Running coprocessor pre-open hook at 1731243535642Writing region info on filesystem at 1731243535642Initializing all the Stores at 1731243535643 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731243535643Cleaning up temporary data from old regions at 1731243535651 (+8 ms)Running coprocessor post-open hooks at 1731243535658 (+7 ms)Region opened successfully at 1731243535659 (+1 ms) 2024-11-10T12:58:55,661 INFO [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1731243534803.20e7219996e942a171d88ffdbef532ab., pid=6, masterSystemTime=1731243535633 2024-11-10T12:58:55,666 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=20e7219996e942a171d88ffdbef532ab, regionState=OPEN, openSeqNum=2, regionLocation=3857ccc89b65,35149,1731243532592 2024-11-10T12:58:55,666 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1731243534803.20e7219996e942a171d88ffdbef532ab. 2024-11-10T12:58:55,666 INFO [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1731243534803.20e7219996e942a171d88ffdbef532ab. 2024-11-10T12:58:55,670 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 20e7219996e942a171d88ffdbef532ab, server=3857ccc89b65,35149,1731243532592 because future has completed 2024-11-10T12:58:55,676 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-10T12:58:55,677 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 20e7219996e942a171d88ffdbef532ab, server=3857ccc89b65,35149,1731243532592 in 193 msec 2024-11-10T12:58:55,681 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-10T12:58:55,681 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=20e7219996e942a171d88ffdbef532ab, ASSIGN in 359 msec 2024-11-10T12:58:55,682 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-10T12:58:55,682 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731243535682"}]},"ts":"1731243535682"} 2024-11-10T12:58:55,686 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-11-10T12:58:55,687 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-10T12:58:55,690 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 875 msec 2024-11-10T12:58:59,880 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-10T12:58:59,932 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-10T12:58:59,933 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-11-10T12:59:02,310 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-10T12:59:02,310 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-10T12:59:02,312 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-10T12:59:02,312 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-10T12:59:02,313 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-10T12:59:02,313 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-10T12:59:02,313 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-10T12:59:02,313 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-10T12:59:04,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43891 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-10T12:59:04,845 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-11-10T12:59:04,848 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-11-10T12:59:04,854 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-11-10T12:59:04,855 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1731243534803.20e7219996e942a171d88ffdbef532ab. 2024-11-10T12:59:04,856 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3857ccc89b65%2C35149%2C1731243532592.1731243544855 2024-11-10T12:59:04,864 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T12:59:04,864 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T12:59:04,865 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T12:59:04,865 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T12:59:04,865 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T12:59:04,865 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/WALs/3857ccc89b65,35149,1731243532592/3857ccc89b65%2C35149%2C1731243532592.1731243533894 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/WALs/3857ccc89b65,35149,1731243532592/3857ccc89b65%2C35149%2C1731243532592.1731243544855 2024-11-10T12:59:04,867 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36941:36941),(127.0.0.1/127.0.0.1:36027:36027)] 2024-11-10T12:59:04,867 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/WALs/3857ccc89b65,35149,1731243532592/3857ccc89b65%2C35149%2C1731243532592.1731243533894 is not closed yet, will try archiving it next time 2024-11-10T12:59:04,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36393 is added to blk_1073741833_1009 (size=451) 2024-11-10T12:59:04,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42169 is added to blk_1073741833_1009 (size=451) 2024-11-10T12:59:04,872 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/WALs/3857ccc89b65,35149,1731243532592/3857ccc89b65%2C35149%2C1731243532592.1731243533894 to hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/oldWALs/3857ccc89b65%2C35149%2C1731243532592.1731243533894 2024-11-10T12:59:04,876 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1731243534803.20e7219996e942a171d88ffdbef532ab., hostname=3857ccc89b65,35149,1731243532592, seqNum=2] 2024-11-10T12:59:16,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35149 {}] regionserver.HRegion(8855): Flush requested on 20e7219996e942a171d88ffdbef532ab 2024-11-10T12:59:16,912 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 20e7219996e942a171d88ffdbef532ab 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-10T12:59:16,974 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/default/TestLogRolling-testSlowSyncLogRolling/20e7219996e942a171d88ffdbef532ab/.tmp/info/7c660060954d4d9aaf28f06f40cb115d is 1080, key is row0001/info:/1731243544878/Put/seqid=0 2024-11-10T12:59:16,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42169 is added to blk_1073741838_1014 (size=12509) 2024-11-10T12:59:16,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36393 is added to blk_1073741838_1014 (size=12509) 2024-11-10T12:59:16,987 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/default/TestLogRolling-testSlowSyncLogRolling/20e7219996e942a171d88ffdbef532ab/.tmp/info/7c660060954d4d9aaf28f06f40cb115d 2024-11-10T12:59:17,035 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/default/TestLogRolling-testSlowSyncLogRolling/20e7219996e942a171d88ffdbef532ab/.tmp/info/7c660060954d4d9aaf28f06f40cb115d as hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/default/TestLogRolling-testSlowSyncLogRolling/20e7219996e942a171d88ffdbef532ab/info/7c660060954d4d9aaf28f06f40cb115d 2024-11-10T12:59:17,048 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/default/TestLogRolling-testSlowSyncLogRolling/20e7219996e942a171d88ffdbef532ab/info/7c660060954d4d9aaf28f06f40cb115d, entries=7, sequenceid=11, filesize=12.2 K 2024-11-10T12:59:17,058 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 20e7219996e942a171d88ffdbef532ab in 143ms, sequenceid=11, compaction requested=false 2024-11-10T12:59:17,059 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 20e7219996e942a171d88ffdbef532ab: 2024-11-10T12:59:20,941 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-10T12:59:24,922 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3857ccc89b65%2C35149%2C1731243532592.1731243564922 2024-11-10T12:59:25,134 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 206 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42169,DS-5f9023b2-2fd3-4ade-a1ed-5d918c1e18d6,DISK], DatanodeInfoWithStorage[127.0.0.1:36393,DS-679bbf95-e96e-4cc6-b4af-c77cd6f1aa5f,DISK]] 2024-11-10T12:59:25,135 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T12:59:25,135 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T12:59:25,135 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T12:59:25,135 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T12:59:25,135 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T12:59:25,135 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/WALs/3857ccc89b65,35149,1731243532592/3857ccc89b65%2C35149%2C1731243532592.1731243544855 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/WALs/3857ccc89b65,35149,1731243532592/3857ccc89b65%2C35149%2C1731243532592.1731243564922 2024-11-10T12:59:25,136 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36941:36941),(127.0.0.1/127.0.0.1:36027:36027)] 2024-11-10T12:59:25,136 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/WALs/3857ccc89b65,35149,1731243532592/3857ccc89b65%2C35149%2C1731243532592.1731243544855 is not closed yet, will try archiving it next time 2024-11-10T12:59:25,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36393 is added to blk_1073741837_1013 (size=12399) 2024-11-10T12:59:25,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42169 is added to blk_1073741837_1013 (size=12399) 2024-11-10T12:59:25,340 INFO [FSHLog-0-hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb-prefix:3857ccc89b65,35149,1731243532592 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42169,DS-5f9023b2-2fd3-4ade-a1ed-5d918c1e18d6,DISK], DatanodeInfoWithStorage[127.0.0.1:36393,DS-679bbf95-e96e-4cc6-b4af-c77cd6f1aa5f,DISK]] 2024-11-10T12:59:27,544 INFO [FSHLog-0-hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb-prefix:3857ccc89b65,35149,1731243532592 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42169,DS-5f9023b2-2fd3-4ade-a1ed-5d918c1e18d6,DISK], DatanodeInfoWithStorage[127.0.0.1:36393,DS-679bbf95-e96e-4cc6-b4af-c77cd6f1aa5f,DISK]] 2024-11-10T12:59:29,749 INFO [FSHLog-0-hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb-prefix:3857ccc89b65,35149,1731243532592 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42169,DS-5f9023b2-2fd3-4ade-a1ed-5d918c1e18d6,DISK], DatanodeInfoWithStorage[127.0.0.1:36393,DS-679bbf95-e96e-4cc6-b4af-c77cd6f1aa5f,DISK]] 2024-11-10T12:59:31,953 INFO [FSHLog-0-hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb-prefix:3857ccc89b65,35149,1731243532592 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42169,DS-5f9023b2-2fd3-4ade-a1ed-5d918c1e18d6,DISK], DatanodeInfoWithStorage[127.0.0.1:36393,DS-679bbf95-e96e-4cc6-b4af-c77cd6f1aa5f,DISK]] 2024-11-10T12:59:31,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35149 {}] regionserver.HRegion(8855): Flush requested on 20e7219996e942a171d88ffdbef532ab 2024-11-10T12:59:31,954 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 20e7219996e942a171d88ffdbef532ab 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-10T12:59:32,155 INFO [FSHLog-0-hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb-prefix:3857ccc89b65,35149,1731243532592 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42169,DS-5f9023b2-2fd3-4ade-a1ed-5d918c1e18d6,DISK], DatanodeInfoWithStorage[127.0.0.1:36393,DS-679bbf95-e96e-4cc6-b4af-c77cd6f1aa5f,DISK]] 2024-11-10T12:59:32,161 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/default/TestLogRolling-testSlowSyncLogRolling/20e7219996e942a171d88ffdbef532ab/.tmp/info/cf5e2437594a481ba6536a316817debb is 1080, key is row0008/info:/1731243558912/Put/seqid=0 2024-11-10T12:59:32,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42169 is added to blk_1073741840_1016 (size=12509) 2024-11-10T12:59:32,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36393 is added to blk_1073741840_1016 (size=12509) 2024-11-10T12:59:32,170 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/default/TestLogRolling-testSlowSyncLogRolling/20e7219996e942a171d88ffdbef532ab/.tmp/info/cf5e2437594a481ba6536a316817debb 2024-11-10T12:59:32,180 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/default/TestLogRolling-testSlowSyncLogRolling/20e7219996e942a171d88ffdbef532ab/.tmp/info/cf5e2437594a481ba6536a316817debb as hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/default/TestLogRolling-testSlowSyncLogRolling/20e7219996e942a171d88ffdbef532ab/info/cf5e2437594a481ba6536a316817debb 2024-11-10T12:59:32,189 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/default/TestLogRolling-testSlowSyncLogRolling/20e7219996e942a171d88ffdbef532ab/info/cf5e2437594a481ba6536a316817debb, entries=7, sequenceid=21, filesize=12.2 K 2024-11-10T12:59:32,390 INFO [FSHLog-0-hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb-prefix:3857ccc89b65,35149,1731243532592 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42169,DS-5f9023b2-2fd3-4ade-a1ed-5d918c1e18d6,DISK], DatanodeInfoWithStorage[127.0.0.1:36393,DS-679bbf95-e96e-4cc6-b4af-c77cd6f1aa5f,DISK]] 2024-11-10T12:59:32,391 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 20e7219996e942a171d88ffdbef532ab in 437ms, sequenceid=21, compaction requested=false 2024-11-10T12:59:32,391 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 20e7219996e942a171d88ffdbef532ab: 2024-11-10T12:59:32,391 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-11-10T12:59:32,391 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-10T12:59:32,392 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/default/TestLogRolling-testSlowSyncLogRolling/20e7219996e942a171d88ffdbef532ab/info/7c660060954d4d9aaf28f06f40cb115d because midkey is the same as first or last row 2024-11-10T12:59:34,157 INFO [FSHLog-0-hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb-prefix:3857ccc89b65,35149,1731243532592 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42169,DS-5f9023b2-2fd3-4ade-a1ed-5d918c1e18d6,DISK], DatanodeInfoWithStorage[127.0.0.1:36393,DS-679bbf95-e96e-4cc6-b4af-c77cd6f1aa5f,DISK]] 2024-11-10T12:59:34,614 INFO [master/3857ccc89b65:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-10T12:59:34,614 INFO [master/3857ccc89b65:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-10T12:59:36,361 INFO [FSHLog-0-hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb-prefix:3857ccc89b65,35149,1731243532592 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42169,DS-5f9023b2-2fd3-4ade-a1ed-5d918c1e18d6,DISK], DatanodeInfoWithStorage[127.0.0.1:36393,DS-679bbf95-e96e-4cc6-b4af-c77cd6f1aa5f,DISK]] 2024-11-10T12:59:36,363 WARN [FSHLog-0-hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb-prefix:3857ccc89b65,35149,1731243532592 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42169,DS-5f9023b2-2fd3-4ade-a1ed-5d918c1e18d6,DISK], DatanodeInfoWithStorage[127.0.0.1:36393,DS-679bbf95-e96e-4cc6-b4af-c77cd6f1aa5f,DISK]] 2024-11-10T12:59:36,364 DEBUG [regionserver/3857ccc89b65:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3857ccc89b65%2C35149%2C1731243532592:(num 1731243564922) roll requested 2024-11-10T12:59:36,365 INFO [regionserver/3857ccc89b65:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3857ccc89b65%2C35149%2C1731243532592.1731243576364 2024-11-10T12:59:36,573 INFO [regionserver/3857ccc89b65:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 206 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42169,DS-5f9023b2-2fd3-4ade-a1ed-5d918c1e18d6,DISK], DatanodeInfoWithStorage[127.0.0.1:36393,DS-679bbf95-e96e-4cc6-b4af-c77cd6f1aa5f,DISK]] 2024-11-10T12:59:36,576 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T12:59:36,576 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T12:59:36,577 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T12:59:36,577 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T12:59:36,577 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T12:59:36,577 INFO [regionserver/3857ccc89b65:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/WALs/3857ccc89b65,35149,1731243532592/3857ccc89b65%2C35149%2C1731243532592.1731243564922 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/WALs/3857ccc89b65,35149,1731243532592/3857ccc89b65%2C35149%2C1731243532592.1731243576364 2024-11-10T12:59:36,578 DEBUG [regionserver/3857ccc89b65:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36941:36941),(127.0.0.1/127.0.0.1:36027:36027)] 2024-11-10T12:59:36,578 DEBUG [regionserver/3857ccc89b65:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/WALs/3857ccc89b65,35149,1731243532592/3857ccc89b65%2C35149%2C1731243532592.1731243564922 is not closed yet, will try archiving it next time 2024-11-10T12:59:36,578 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/WALs/3857ccc89b65,35149,1731243532592/3857ccc89b65%2C35149%2C1731243532592.1731243544855 to hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/oldWALs/3857ccc89b65%2C35149%2C1731243532592.1731243544855 2024-11-10T12:59:36,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36393 is added to blk_1073741839_1015 (size=7739) 2024-11-10T12:59:36,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42169 is added to blk_1073741839_1015 (size=7739) 2024-11-10T12:59:38,565 INFO [FSHLog-0-hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb-prefix:3857ccc89b65,35149,1731243532592 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42169,DS-5f9023b2-2fd3-4ade-a1ed-5d918c1e18d6,DISK], DatanodeInfoWithStorage[127.0.0.1:36393,DS-679bbf95-e96e-4cc6-b4af-c77cd6f1aa5f,DISK]] 2024-11-10T12:59:40,641 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 20e7219996e942a171d88ffdbef532ab, had cached 0 bytes from a total of 25018 2024-11-10T12:59:40,770 INFO [FSHLog-0-hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb-prefix:3857ccc89b65,35149,1731243532592 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42169,DS-5f9023b2-2fd3-4ade-a1ed-5d918c1e18d6,DISK], DatanodeInfoWithStorage[127.0.0.1:36393,DS-679bbf95-e96e-4cc6-b4af-c77cd6f1aa5f,DISK]] 2024-11-10T12:59:42,974 INFO [FSHLog-0-hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb-prefix:3857ccc89b65,35149,1731243532592 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42169,DS-5f9023b2-2fd3-4ade-a1ed-5d918c1e18d6,DISK], DatanodeInfoWithStorage[127.0.0.1:36393,DS-679bbf95-e96e-4cc6-b4af-c77cd6f1aa5f,DISK]] 2024-11-10T12:59:45,178 INFO [FSHLog-0-hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb-prefix:3857ccc89b65,35149,1731243532592 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42169,DS-5f9023b2-2fd3-4ade-a1ed-5d918c1e18d6,DISK], DatanodeInfoWithStorage[127.0.0.1:36393,DS-679bbf95-e96e-4cc6-b4af-c77cd6f1aa5f,DISK]] 2024-11-10T12:59:47,180 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-10T12:59:47,180 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3857ccc89b65%2C35149%2C1731243532592.1731243587180 2024-11-10T12:59:50,941 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-10T12:59:52,189 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42169,DS-5f9023b2-2fd3-4ade-a1ed-5d918c1e18d6,DISK], DatanodeInfoWithStorage[127.0.0.1:36393,DS-679bbf95-e96e-4cc6-b4af-c77cd6f1aa5f,DISK]] 2024-11-10T12:59:52,191 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42169,DS-5f9023b2-2fd3-4ade-a1ed-5d918c1e18d6,DISK], DatanodeInfoWithStorage[127.0.0.1:36393,DS-679bbf95-e96e-4cc6-b4af-c77cd6f1aa5f,DISK]] 2024-11-10T12:59:52,191 DEBUG [regionserver/3857ccc89b65:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3857ccc89b65%2C35149%2C1731243532592:(num 1731243587180) roll requested 2024-11-10T12:59:52,191 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T12:59:52,191 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T12:59:52,192 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T12:59:52,192 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T12:59:52,192 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T12:59:52,192 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/WALs/3857ccc89b65,35149,1731243532592/3857ccc89b65%2C35149%2C1731243532592.1731243576364 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/WALs/3857ccc89b65,35149,1731243532592/3857ccc89b65%2C35149%2C1731243532592.1731243587180 2024-11-10T12:59:52,193 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36941:36941),(127.0.0.1/127.0.0.1:36027:36027)] 2024-11-10T12:59:52,193 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/WALs/3857ccc89b65,35149,1731243532592/3857ccc89b65%2C35149%2C1731243532592.1731243576364 is not closed yet, will try archiving it next time 2024-11-10T12:59:52,194 INFO [regionserver/3857ccc89b65:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3857ccc89b65%2C35149%2C1731243532592.1731243592193 2024-11-10T12:59:52,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36393 is added to blk_1073741841_1017 (size=4753) 2024-11-10T12:59:52,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42169 is added to blk_1073741841_1017 (size=4753) 2024-11-10T12:59:57,197 INFO [FSHLog-0-hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb-prefix:3857ccc89b65,35149,1731243532592 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42169,DS-5f9023b2-2fd3-4ade-a1ed-5d918c1e18d6,DISK], DatanodeInfoWithStorage[127.0.0.1:36393,DS-679bbf95-e96e-4cc6-b4af-c77cd6f1aa5f,DISK]] 2024-11-10T12:59:57,197 WARN [FSHLog-0-hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb-prefix:3857ccc89b65,35149,1731243532592 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42169,DS-5f9023b2-2fd3-4ade-a1ed-5d918c1e18d6,DISK], DatanodeInfoWithStorage[127.0.0.1:36393,DS-679bbf95-e96e-4cc6-b4af-c77cd6f1aa5f,DISK]] 2024-11-10T12:59:57,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35149 {}] regionserver.HRegion(8855): Flush requested on 20e7219996e942a171d88ffdbef532ab 2024-11-10T12:59:57,197 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 20e7219996e942a171d88ffdbef532ab 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-10T12:59:57,205 INFO [regionserver/3857ccc89b65:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5006 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42169,DS-5f9023b2-2fd3-4ade-a1ed-5d918c1e18d6,DISK], DatanodeInfoWithStorage[127.0.0.1:36393,DS-679bbf95-e96e-4cc6-b4af-c77cd6f1aa5f,DISK]] 2024-11-10T12:59:57,205 WARN [regionserver/3857ccc89b65:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5006 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42169,DS-5f9023b2-2fd3-4ade-a1ed-5d918c1e18d6,DISK], DatanodeInfoWithStorage[127.0.0.1:36393,DS-679bbf95-e96e-4cc6-b4af-c77cd6f1aa5f,DISK]] 2024-11-10T12:59:59,198 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-10T13:00:02,199 INFO [FSHLog-0-hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb-prefix:3857ccc89b65,35149,1731243532592 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42169,DS-5f9023b2-2fd3-4ade-a1ed-5d918c1e18d6,DISK], DatanodeInfoWithStorage[127.0.0.1:36393,DS-679bbf95-e96e-4cc6-b4af-c77cd6f1aa5f,DISK]] 2024-11-10T13:00:02,200 WARN [FSHLog-0-hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb-prefix:3857ccc89b65,35149,1731243532592 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42169,DS-5f9023b2-2fd3-4ade-a1ed-5d918c1e18d6,DISK], DatanodeInfoWithStorage[127.0.0.1:36393,DS-679bbf95-e96e-4cc6-b4af-c77cd6f1aa5f,DISK]] 2024-11-10T13:00:02,200 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:02,200 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:02,200 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:02,200 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:02,200 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:02,201 INFO [regionserver/3857ccc89b65:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/WALs/3857ccc89b65,35149,1731243532592/3857ccc89b65%2C35149%2C1731243532592.1731243587180 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/WALs/3857ccc89b65,35149,1731243532592/3857ccc89b65%2C35149%2C1731243532592.1731243592193 2024-11-10T13:00:02,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36393 is added to blk_1073741842_1018 (size=1569) 2024-11-10T13:00:02,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42169 is added to blk_1073741842_1018 (size=1569) 2024-11-10T13:00:02,205 DEBUG [regionserver/3857ccc89b65:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36027:36027),(127.0.0.1/127.0.0.1:36941:36941)] 2024-11-10T13:00:02,205 DEBUG [regionserver/3857ccc89b65:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/WALs/3857ccc89b65,35149,1731243532592/3857ccc89b65%2C35149%2C1731243532592.1731243587180 is not closed yet, will try archiving it next time 2024-11-10T13:00:02,205 DEBUG [regionserver/3857ccc89b65:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3857ccc89b65%2C35149%2C1731243532592:(num 1731243592193) roll requested 2024-11-10T13:00:02,206 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/default/TestLogRolling-testSlowSyncLogRolling/20e7219996e942a171d88ffdbef532ab/.tmp/info/dc53ab18a81144ad8d096603099fa580 is 1080, key is row0015/info:/1731243573956/Put/seqid=0 2024-11-10T13:00:02,206 INFO [regionserver/3857ccc89b65:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3857ccc89b65%2C35149%2C1731243532592.1731243602205 2024-11-10T13:00:02,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42169 is added to blk_1073741844_1020 (size=12509) 2024-11-10T13:00:02,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36393 is added to blk_1073741844_1020 (size=12509) 2024-11-10T13:00:02,213 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/default/TestLogRolling-testSlowSyncLogRolling/20e7219996e942a171d88ffdbef532ab/.tmp/info/dc53ab18a81144ad8d096603099fa580 2024-11-10T13:00:02,224 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/default/TestLogRolling-testSlowSyncLogRolling/20e7219996e942a171d88ffdbef532ab/.tmp/info/dc53ab18a81144ad8d096603099fa580 as hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/default/TestLogRolling-testSlowSyncLogRolling/20e7219996e942a171d88ffdbef532ab/info/dc53ab18a81144ad8d096603099fa580 2024-11-10T13:00:02,234 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/default/TestLogRolling-testSlowSyncLogRolling/20e7219996e942a171d88ffdbef532ab/info/dc53ab18a81144ad8d096603099fa580, entries=7, sequenceid=31, filesize=12.2 K 2024-11-10T13:00:07,213 INFO [regionserver/3857ccc89b65:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36393,DS-679bbf95-e96e-4cc6-b4af-c77cd6f1aa5f,DISK], DatanodeInfoWithStorage[127.0.0.1:42169,DS-5f9023b2-2fd3-4ade-a1ed-5d918c1e18d6,DISK]] 2024-11-10T13:00:07,213 WARN [regionserver/3857ccc89b65:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36393,DS-679bbf95-e96e-4cc6-b4af-c77cd6f1aa5f,DISK], DatanodeInfoWithStorage[127.0.0.1:42169,DS-5f9023b2-2fd3-4ade-a1ed-5d918c1e18d6,DISK]] 2024-11-10T13:00:07,236 INFO [FSHLog-0-hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb-prefix:3857ccc89b65,35149,1731243532592 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36393,DS-679bbf95-e96e-4cc6-b4af-c77cd6f1aa5f,DISK], DatanodeInfoWithStorage[127.0.0.1:42169,DS-5f9023b2-2fd3-4ade-a1ed-5d918c1e18d6,DISK]] 2024-11-10T13:00:07,236 WARN [FSHLog-0-hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb-prefix:3857ccc89b65,35149,1731243532592 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36393,DS-679bbf95-e96e-4cc6-b4af-c77cd6f1aa5f,DISK], DatanodeInfoWithStorage[127.0.0.1:42169,DS-5f9023b2-2fd3-4ade-a1ed-5d918c1e18d6,DISK]] 2024-11-10T13:00:07,236 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 20e7219996e942a171d88ffdbef532ab in 10039ms, sequenceid=31, compaction requested=true 2024-11-10T13:00:07,237 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 20e7219996e942a171d88ffdbef532ab: 2024-11-10T13:00:07,237 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:07,237 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:07,237 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-11-10T13:00:07,237 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-10T13:00:07,237 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:07,237 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:07,237 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/default/TestLogRolling-testSlowSyncLogRolling/20e7219996e942a171d88ffdbef532ab/info/7c660060954d4d9aaf28f06f40cb115d because midkey is the same as first or last row 2024-11-10T13:00:07,237 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:07,237 INFO [regionserver/3857ccc89b65:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/WALs/3857ccc89b65,35149,1731243532592/3857ccc89b65%2C35149%2C1731243532592.1731243592193 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/WALs/3857ccc89b65,35149,1731243532592/3857ccc89b65%2C35149%2C1731243532592.1731243602205 2024-11-10T13:00:07,238 DEBUG [regionserver/3857ccc89b65:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36941:36941),(127.0.0.1/127.0.0.1:36027:36027)] 2024-11-10T13:00:07,239 DEBUG [regionserver/3857ccc89b65:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/WALs/3857ccc89b65,35149,1731243532592/3857ccc89b65%2C35149%2C1731243532592.1731243592193 is not closed yet, will try archiving it next time 2024-11-10T13:00:07,239 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/WALs/3857ccc89b65,35149,1731243532592/3857ccc89b65%2C35149%2C1731243532592.1731243564922 to hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/oldWALs/3857ccc89b65%2C35149%2C1731243532592.1731243564922 2024-11-10T13:00:07,239 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 20e7219996e942a171d88ffdbef532ab:info, priority=-2147483648, current under compaction store size is 1 2024-11-10T13:00:07,239 DEBUG [regionserver/3857ccc89b65:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3857ccc89b65%2C35149%2C1731243532592:(num 1731243607239) roll requested 2024-11-10T13:00:07,239 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3857ccc89b65%2C35149%2C1731243532592.1731243607239 2024-11-10T13:00:07,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42169 is added to blk_1073741843_1019 (size=438) 2024-11-10T13:00:07,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36393 is added to blk_1073741843_1019 (size=438) 2024-11-10T13:00:07,242 DEBUG [RS:0;3857ccc89b65:35149-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-10T13:00:07,242 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/WALs/3857ccc89b65,35149,1731243532592/3857ccc89b65%2C35149%2C1731243532592.1731243576364 to hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/oldWALs/3857ccc89b65%2C35149%2C1731243532592.1731243576364 2024-11-10T13:00:07,243 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-10T13:00:07,244 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/WALs/3857ccc89b65,35149,1731243532592/3857ccc89b65%2C35149%2C1731243532592.1731243587180 to hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/oldWALs/3857ccc89b65%2C35149%2C1731243532592.1731243587180 2024-11-10T13:00:07,245 DEBUG [RS:0;3857ccc89b65:35149-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-10T13:00:07,246 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/WALs/3857ccc89b65,35149,1731243532592/3857ccc89b65%2C35149%2C1731243532592.1731243592193 to hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/oldWALs/3857ccc89b65%2C35149%2C1731243532592.1731243592193 2024-11-10T13:00:07,247 DEBUG [RS:0;3857ccc89b65:35149-shortCompactions-0 {}] regionserver.HStore(1541): 20e7219996e942a171d88ffdbef532ab/info is initiating minor compaction (all files) 2024-11-10T13:00:07,247 INFO [RS:0;3857ccc89b65:35149-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 20e7219996e942a171d88ffdbef532ab/info in TestLogRolling-testSlowSyncLogRolling,,1731243534803.20e7219996e942a171d88ffdbef532ab. 2024-11-10T13:00:07,248 INFO [RS:0;3857ccc89b65:35149-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/default/TestLogRolling-testSlowSyncLogRolling/20e7219996e942a171d88ffdbef532ab/info/7c660060954d4d9aaf28f06f40cb115d, hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/default/TestLogRolling-testSlowSyncLogRolling/20e7219996e942a171d88ffdbef532ab/info/cf5e2437594a481ba6536a316817debb, hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/default/TestLogRolling-testSlowSyncLogRolling/20e7219996e942a171d88ffdbef532ab/info/dc53ab18a81144ad8d096603099fa580] into tmpdir=hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/default/TestLogRolling-testSlowSyncLogRolling/20e7219996e942a171d88ffdbef532ab/.tmp, totalSize=36.6 K 2024-11-10T13:00:07,248 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:07,248 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:07,248 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:07,248 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:07,248 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:07,249 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/WALs/3857ccc89b65,35149,1731243532592/3857ccc89b65%2C35149%2C1731243532592.1731243602205 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/WALs/3857ccc89b65,35149,1731243532592/3857ccc89b65%2C35149%2C1731243532592.1731243607239 2024-11-10T13:00:07,249 DEBUG [RS:0;3857ccc89b65:35149-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7c660060954d4d9aaf28f06f40cb115d, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731243544878 2024-11-10T13:00:07,250 DEBUG [RS:0;3857ccc89b65:35149-shortCompactions-0 {}] compactions.Compactor(225): Compacting cf5e2437594a481ba6536a316817debb, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1731243558912 2024-11-10T13:00:07,251 DEBUG [RS:0;3857ccc89b65:35149-shortCompactions-0 {}] compactions.Compactor(225): Compacting dc53ab18a81144ad8d096603099fa580, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1731243573956 2024-11-10T13:00:07,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36393 is added to blk_1073741845_1021 (size=93) 2024-11-10T13:00:07,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42169 is added to blk_1073741845_1021 (size=93) 2024-11-10T13:00:07,252 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/WALs/3857ccc89b65,35149,1731243532592/3857ccc89b65%2C35149%2C1731243532592.1731243602205 to hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/oldWALs/3857ccc89b65%2C35149%2C1731243532592.1731243602205 2024-11-10T13:00:07,253 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36941:36941),(127.0.0.1/127.0.0.1:36027:36027)] 2024-11-10T13:00:07,253 INFO [regionserver/3857ccc89b65:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3857ccc89b65%2C35149%2C1731243532592.1731243607253 2024-11-10T13:00:07,259 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:07,260 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:07,260 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:07,260 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:07,260 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:07,260 INFO [regionserver/3857ccc89b65:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/WALs/3857ccc89b65,35149,1731243532592/3857ccc89b65%2C35149%2C1731243532592.1731243607239 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/WALs/3857ccc89b65,35149,1731243532592/3857ccc89b65%2C35149%2C1731243532592.1731243607253 2024-11-10T13:00:07,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36393 is added to blk_1073741846_1022 (size=1258) 2024-11-10T13:00:07,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42169 is added to blk_1073741846_1022 (size=1258) 2024-11-10T13:00:07,265 DEBUG [regionserver/3857ccc89b65:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36027:36027),(127.0.0.1/127.0.0.1:36941:36941)] 2024-11-10T13:00:07,281 INFO [RS:0;3857ccc89b65:35149-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 20e7219996e942a171d88ffdbef532ab#info#compaction#3 average throughput is 10.77 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-10T13:00:07,282 DEBUG [RS:0;3857ccc89b65:35149-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/default/TestLogRolling-testSlowSyncLogRolling/20e7219996e942a171d88ffdbef532ab/.tmp/info/25e5342a014a4742aff766341da436e6 is 1080, key is row0001/info:/1731243544878/Put/seqid=0 2024-11-10T13:00:07,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36393 is added to blk_1073741848_1024 (size=27710) 2024-11-10T13:00:07,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42169 is added to blk_1073741848_1024 (size=27710) 2024-11-10T13:00:07,297 DEBUG [RS:0;3857ccc89b65:35149-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/default/TestLogRolling-testSlowSyncLogRolling/20e7219996e942a171d88ffdbef532ab/.tmp/info/25e5342a014a4742aff766341da436e6 as hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/default/TestLogRolling-testSlowSyncLogRolling/20e7219996e942a171d88ffdbef532ab/info/25e5342a014a4742aff766341da436e6 2024-11-10T13:00:07,313 INFO [RS:0;3857ccc89b65:35149-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 20e7219996e942a171d88ffdbef532ab/info of 20e7219996e942a171d88ffdbef532ab into 25e5342a014a4742aff766341da436e6(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-10T13:00:07,313 DEBUG [RS:0;3857ccc89b65:35149-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 20e7219996e942a171d88ffdbef532ab: 2024-11-10T13:00:07,315 INFO [RS:0;3857ccc89b65:35149-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1731243534803.20e7219996e942a171d88ffdbef532ab., storeName=20e7219996e942a171d88ffdbef532ab/info, priority=13, startTime=1731243607238; duration=0sec 2024-11-10T13:00:07,315 DEBUG [RS:0;3857ccc89b65:35149-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-10T13:00:07,315 DEBUG [RS:0;3857ccc89b65:35149-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-10T13:00:07,315 DEBUG [RS:0;3857ccc89b65:35149-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/default/TestLogRolling-testSlowSyncLogRolling/20e7219996e942a171d88ffdbef532ab/info/25e5342a014a4742aff766341da436e6 because midkey is the same as first or last row 2024-11-10T13:00:07,316 DEBUG [RS:0;3857ccc89b65:35149-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-10T13:00:07,316 DEBUG [RS:0;3857ccc89b65:35149-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-10T13:00:07,316 DEBUG [RS:0;3857ccc89b65:35149-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/default/TestLogRolling-testSlowSyncLogRolling/20e7219996e942a171d88ffdbef532ab/info/25e5342a014a4742aff766341da436e6 because midkey is the same as first or last row 2024-11-10T13:00:07,316 DEBUG [RS:0;3857ccc89b65:35149-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-10T13:00:07,316 DEBUG [RS:0;3857ccc89b65:35149-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-10T13:00:07,316 DEBUG [RS:0;3857ccc89b65:35149-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/default/TestLogRolling-testSlowSyncLogRolling/20e7219996e942a171d88ffdbef532ab/info/25e5342a014a4742aff766341da436e6 because midkey is the same as first or last row 2024-11-10T13:00:07,316 DEBUG [RS:0;3857ccc89b65:35149-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-10T13:00:07,316 DEBUG [RS:0;3857ccc89b65:35149-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 20e7219996e942a171d88ffdbef532ab:info 2024-11-10T13:00:19,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35149 {}] regionserver.HRegion(8855): Flush requested on 20e7219996e942a171d88ffdbef532ab 2024-11-10T13:00:19,278 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 20e7219996e942a171d88ffdbef532ab 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-10T13:00:19,285 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/default/TestLogRolling-testSlowSyncLogRolling/20e7219996e942a171d88ffdbef532ab/.tmp/info/066a3dd5f6c641dba509e4d2bd41ad84 is 1080, key is row0022/info:/1731243607255/Put/seqid=0 2024-11-10T13:00:19,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42169 is added to blk_1073741849_1025 (size=12509) 2024-11-10T13:00:19,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36393 is added to blk_1073741849_1025 (size=12509) 2024-11-10T13:00:19,292 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/default/TestLogRolling-testSlowSyncLogRolling/20e7219996e942a171d88ffdbef532ab/.tmp/info/066a3dd5f6c641dba509e4d2bd41ad84 2024-11-10T13:00:19,301 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/default/TestLogRolling-testSlowSyncLogRolling/20e7219996e942a171d88ffdbef532ab/.tmp/info/066a3dd5f6c641dba509e4d2bd41ad84 as hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/default/TestLogRolling-testSlowSyncLogRolling/20e7219996e942a171d88ffdbef532ab/info/066a3dd5f6c641dba509e4d2bd41ad84 2024-11-10T13:00:19,309 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/default/TestLogRolling-testSlowSyncLogRolling/20e7219996e942a171d88ffdbef532ab/info/066a3dd5f6c641dba509e4d2bd41ad84, entries=7, sequenceid=42, filesize=12.2 K 2024-11-10T13:00:19,310 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 20e7219996e942a171d88ffdbef532ab in 33ms, sequenceid=42, compaction requested=false 2024-11-10T13:00:19,310 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 20e7219996e942a171d88ffdbef532ab: 2024-11-10T13:00:19,310 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-11-10T13:00:19,310 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-10T13:00:19,311 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/default/TestLogRolling-testSlowSyncLogRolling/20e7219996e942a171d88ffdbef532ab/info/25e5342a014a4742aff766341da436e6 because midkey is the same as first or last row 2024-11-10T13:00:20,941 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-10T13:00:25,642 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 20e7219996e942a171d88ffdbef532ab, had cached 0 bytes from a total of 40219 2024-11-10T13:00:27,288 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-10T13:00:27,289 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-10T13:00:27,289 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T13:00:27,294 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T13:00:27,294 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T13:00:27,294 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-10T13:00:27,295 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-10T13:00:27,295 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=655794698, stopped=false 2024-11-10T13:00:27,295 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=3857ccc89b65,43891,1731243531861 2024-11-10T13:00:27,297 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43891-0x10101f5c5890000, quorum=127.0.0.1:59930, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-10T13:00:27,297 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35149-0x10101f5c5890001, quorum=127.0.0.1:59930, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-10T13:00:27,297 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43891-0x10101f5c5890000, quorum=127.0.0.1:59930, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:00:27,297 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-10T13:00:27,297 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35149-0x10101f5c5890001, quorum=127.0.0.1:59930, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:00:27,297 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-10T13:00:27,298 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T13:00:27,298 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T13:00:27,298 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:43891-0x10101f5c5890000, quorum=127.0.0.1:59930, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T13:00:27,298 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35149-0x10101f5c5890001, quorum=127.0.0.1:59930, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T13:00:27,298 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3857ccc89b65,35149,1731243532592' ***** 2024-11-10T13:00:27,298 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-10T13:00:27,298 INFO [RS:0;3857ccc89b65:35149 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-10T13:00:27,299 INFO [RS:0;3857ccc89b65:35149 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-10T13:00:27,299 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-10T13:00:27,299 INFO [RS:0;3857ccc89b65:35149 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-10T13:00:27,299 INFO [RS:0;3857ccc89b65:35149 {}] regionserver.HRegionServer(3091): Received CLOSE for 20e7219996e942a171d88ffdbef532ab 2024-11-10T13:00:27,300 INFO [RS:0;3857ccc89b65:35149 {}] regionserver.HRegionServer(959): stopping server 3857ccc89b65,35149,1731243532592 2024-11-10T13:00:27,300 INFO [RS:0;3857ccc89b65:35149 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-10T13:00:27,300 INFO [RS:0;3857ccc89b65:35149 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;3857ccc89b65:35149. 2024-11-10T13:00:27,300 DEBUG [RS:0;3857ccc89b65:35149 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T13:00:27,300 DEBUG [RS:0;3857ccc89b65:35149 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T13:00:27,300 DEBUG [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 20e7219996e942a171d88ffdbef532ab, disabling compactions & flushes 2024-11-10T13:00:27,300 INFO [RS:0;3857ccc89b65:35149 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-10T13:00:27,300 INFO [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731243534803.20e7219996e942a171d88ffdbef532ab. 2024-11-10T13:00:27,300 INFO [RS:0;3857ccc89b65:35149 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-10T13:00:27,300 DEBUG [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731243534803.20e7219996e942a171d88ffdbef532ab. 2024-11-10T13:00:27,300 INFO [RS:0;3857ccc89b65:35149 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-10T13:00:27,300 DEBUG [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731243534803.20e7219996e942a171d88ffdbef532ab. after waiting 0 ms 2024-11-10T13:00:27,300 DEBUG [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731243534803.20e7219996e942a171d88ffdbef532ab. 2024-11-10T13:00:27,300 INFO [RS:0;3857ccc89b65:35149 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-10T13:00:27,301 INFO [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 20e7219996e942a171d88ffdbef532ab 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-10T13:00:27,301 INFO [RS:0;3857ccc89b65:35149 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-10T13:00:27,301 DEBUG [RS:0;3857ccc89b65:35149 {}] regionserver.HRegionServer(1325): Online Regions={20e7219996e942a171d88ffdbef532ab=TestLogRolling-testSlowSyncLogRolling,,1731243534803.20e7219996e942a171d88ffdbef532ab., 1588230740=hbase:meta,,1.1588230740} 2024-11-10T13:00:27,301 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-10T13:00:27,301 INFO [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-10T13:00:27,301 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-10T13:00:27,301 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-10T13:00:27,301 DEBUG [RS:0;3857ccc89b65:35149 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 20e7219996e942a171d88ffdbef532ab 2024-11-10T13:00:27,301 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-10T13:00:27,301 INFO [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-11-10T13:00:27,306 DEBUG [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/default/TestLogRolling-testSlowSyncLogRolling/20e7219996e942a171d88ffdbef532ab/.tmp/info/f9a827c45cc24d8e8db3326aca7819d7 is 1080, key is row0029/info:/1731243621280/Put/seqid=0 2024-11-10T13:00:27,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36393 is added to blk_1073741850_1026 (size=8193) 2024-11-10T13:00:27,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42169 is added to blk_1073741850_1026 (size=8193) 2024-11-10T13:00:27,315 INFO [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/default/TestLogRolling-testSlowSyncLogRolling/20e7219996e942a171d88ffdbef532ab/.tmp/info/f9a827c45cc24d8e8db3326aca7819d7 2024-11-10T13:00:27,323 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/hbase/meta/1588230740/.tmp/info/50b2d35c5cf14911b0f75fcd1f5ebd09 is 195, key is TestLogRolling-testSlowSyncLogRolling,,1731243534803.20e7219996e942a171d88ffdbef532ab./info:regioninfo/1731243535666/Put/seqid=0 2024-11-10T13:00:27,323 DEBUG [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/default/TestLogRolling-testSlowSyncLogRolling/20e7219996e942a171d88ffdbef532ab/.tmp/info/f9a827c45cc24d8e8db3326aca7819d7 as hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/default/TestLogRolling-testSlowSyncLogRolling/20e7219996e942a171d88ffdbef532ab/info/f9a827c45cc24d8e8db3326aca7819d7 2024-11-10T13:00:27,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42169 is added to blk_1073741851_1027 (size=7016) 2024-11-10T13:00:27,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36393 is added to blk_1073741851_1027 (size=7016) 2024-11-10T13:00:27,331 INFO [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/hbase/meta/1588230740/.tmp/info/50b2d35c5cf14911b0f75fcd1f5ebd09 2024-11-10T13:00:27,332 INFO [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/default/TestLogRolling-testSlowSyncLogRolling/20e7219996e942a171d88ffdbef532ab/info/f9a827c45cc24d8e8db3326aca7819d7, entries=3, sequenceid=48, filesize=8.0 K 2024-11-10T13:00:27,333 INFO [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 20e7219996e942a171d88ffdbef532ab in 33ms, sequenceid=48, compaction requested=true 2024-11-10T13:00:27,334 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731243534803.20e7219996e942a171d88ffdbef532ab.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/default/TestLogRolling-testSlowSyncLogRolling/20e7219996e942a171d88ffdbef532ab/info/7c660060954d4d9aaf28f06f40cb115d, hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/default/TestLogRolling-testSlowSyncLogRolling/20e7219996e942a171d88ffdbef532ab/info/cf5e2437594a481ba6536a316817debb, hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/default/TestLogRolling-testSlowSyncLogRolling/20e7219996e942a171d88ffdbef532ab/info/dc53ab18a81144ad8d096603099fa580] to archive 2024-11-10T13:00:27,337 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731243534803.20e7219996e942a171d88ffdbef532ab.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-10T13:00:27,340 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731243534803.20e7219996e942a171d88ffdbef532ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/default/TestLogRolling-testSlowSyncLogRolling/20e7219996e942a171d88ffdbef532ab/info/7c660060954d4d9aaf28f06f40cb115d to hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/archive/data/default/TestLogRolling-testSlowSyncLogRolling/20e7219996e942a171d88ffdbef532ab/info/7c660060954d4d9aaf28f06f40cb115d 2024-11-10T13:00:27,342 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731243534803.20e7219996e942a171d88ffdbef532ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/default/TestLogRolling-testSlowSyncLogRolling/20e7219996e942a171d88ffdbef532ab/info/cf5e2437594a481ba6536a316817debb to hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/archive/data/default/TestLogRolling-testSlowSyncLogRolling/20e7219996e942a171d88ffdbef532ab/info/cf5e2437594a481ba6536a316817debb 2024-11-10T13:00:27,344 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731243534803.20e7219996e942a171d88ffdbef532ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/default/TestLogRolling-testSlowSyncLogRolling/20e7219996e942a171d88ffdbef532ab/info/dc53ab18a81144ad8d096603099fa580 to hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/archive/data/default/TestLogRolling-testSlowSyncLogRolling/20e7219996e942a171d88ffdbef532ab/info/dc53ab18a81144ad8d096603099fa580 2024-11-10T13:00:27,360 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/hbase/meta/1588230740/.tmp/ns/7033d561346740ab9513bd51f5b75bc8 is 43, key is default/ns:d/1731243534499/Put/seqid=0 2024-11-10T13:00:27,359 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731243534803.20e7219996e942a171d88ffdbef532ab.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=3857ccc89b65:43891 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-10T13:00:27,364 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731243534803.20e7219996e942a171d88ffdbef532ab.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [7c660060954d4d9aaf28f06f40cb115d=12509, cf5e2437594a481ba6536a316817debb=12509, dc53ab18a81144ad8d096603099fa580=12509] 2024-11-10T13:00:27,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42169 is added to blk_1073741852_1028 (size=5153) 2024-11-10T13:00:27,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36393 is added to blk_1073741852_1028 (size=5153) 2024-11-10T13:00:27,367 INFO [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/hbase/meta/1588230740/.tmp/ns/7033d561346740ab9513bd51f5b75bc8 2024-11-10T13:00:27,370 DEBUG [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/default/TestLogRolling-testSlowSyncLogRolling/20e7219996e942a171d88ffdbef532ab/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-11-10T13:00:27,372 INFO [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731243534803.20e7219996e942a171d88ffdbef532ab. 2024-11-10T13:00:27,373 DEBUG [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 20e7219996e942a171d88ffdbef532ab: Waiting for close lock at 1731243627300Running coprocessor pre-close hooks at 1731243627300Disabling compacts and flushes for region at 1731243627300Disabling writes for close at 1731243627300Obtaining lock to block concurrent updates at 1731243627301 (+1 ms)Preparing flush snapshotting stores in 20e7219996e942a171d88ffdbef532ab at 1731243627301Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1731243534803.20e7219996e942a171d88ffdbef532ab., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1731243627301Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1731243534803.20e7219996e942a171d88ffdbef532ab. at 1731243627302 (+1 ms)Flushing 20e7219996e942a171d88ffdbef532ab/info: creating writer at 1731243627302Flushing 20e7219996e942a171d88ffdbef532ab/info: appending metadata at 1731243627306 (+4 ms)Flushing 20e7219996e942a171d88ffdbef532ab/info: closing flushed file at 1731243627306Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4cb87630: reopening flushed file at 1731243627322 (+16 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 20e7219996e942a171d88ffdbef532ab in 33ms, sequenceid=48, compaction requested=true at 1731243627333 (+11 ms)Writing region close event to WAL at 1731243627365 (+32 ms)Running coprocessor post-close hooks at 1731243627371 (+6 ms)Closed at 1731243627372 (+1 ms) 2024-11-10T13:00:27,373 DEBUG [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1731243534803.20e7219996e942a171d88ffdbef532ab. 2024-11-10T13:00:27,390 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/hbase/meta/1588230740/.tmp/table/8615251e40d9427d95c753f2a75e672c is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1731243535682/Put/seqid=0 2024-11-10T13:00:27,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36393 is added to blk_1073741853_1029 (size=5396) 2024-11-10T13:00:27,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42169 is added to blk_1073741853_1029 (size=5396) 2024-11-10T13:00:27,397 INFO [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/hbase/meta/1588230740/.tmp/table/8615251e40d9427d95c753f2a75e672c 2024-11-10T13:00:27,404 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/hbase/meta/1588230740/.tmp/info/50b2d35c5cf14911b0f75fcd1f5ebd09 as hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/hbase/meta/1588230740/info/50b2d35c5cf14911b0f75fcd1f5ebd09 2024-11-10T13:00:27,411 INFO [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/hbase/meta/1588230740/info/50b2d35c5cf14911b0f75fcd1f5ebd09, entries=10, sequenceid=11, filesize=6.9 K 2024-11-10T13:00:27,412 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/hbase/meta/1588230740/.tmp/ns/7033d561346740ab9513bd51f5b75bc8 as hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/hbase/meta/1588230740/ns/7033d561346740ab9513bd51f5b75bc8 2024-11-10T13:00:27,420 INFO [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/hbase/meta/1588230740/ns/7033d561346740ab9513bd51f5b75bc8, entries=2, sequenceid=11, filesize=5.0 K 2024-11-10T13:00:27,421 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/hbase/meta/1588230740/.tmp/table/8615251e40d9427d95c753f2a75e672c as hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/hbase/meta/1588230740/table/8615251e40d9427d95c753f2a75e672c 2024-11-10T13:00:27,428 INFO [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/hbase/meta/1588230740/table/8615251e40d9427d95c753f2a75e672c, entries=2, sequenceid=11, filesize=5.3 K 2024-11-10T13:00:27,429 INFO [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 128ms, sequenceid=11, compaction requested=false 2024-11-10T13:00:27,435 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-10T13:00:27,436 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-10T13:00:27,436 INFO [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-10T13:00:27,436 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731243627301Running coprocessor pre-close hooks at 1731243627301Disabling compacts and flushes for region at 1731243627301Disabling writes for close at 1731243627301Obtaining lock to block concurrent updates at 1731243627301Preparing flush snapshotting stores in 1588230740 at 1731243627301Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1731243627302 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731243627303 (+1 ms)Flushing 1588230740/info: creating writer at 1731243627303Flushing 1588230740/info: appending metadata at 1731243627322 (+19 ms)Flushing 1588230740/info: closing flushed file at 1731243627322Flushing 1588230740/ns: creating writer at 1731243627338 (+16 ms)Flushing 1588230740/ns: appending metadata at 1731243627360 (+22 ms)Flushing 1588230740/ns: closing flushed file at 1731243627360Flushing 1588230740/table: creating writer at 1731243627375 (+15 ms)Flushing 1588230740/table: appending metadata at 1731243627390 (+15 ms)Flushing 1588230740/table: closing flushed file at 1731243627390Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@595df504: reopening flushed file at 1731243627403 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1cacfc8b: reopening flushed file at 1731243627411 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@69559895: reopening flushed file at 1731243627420 (+9 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 128ms, sequenceid=11, compaction requested=false at 1731243627429 (+9 ms)Writing region close event to WAL at 1731243627431 (+2 ms)Running coprocessor post-close hooks at 1731243627436 (+5 ms)Closed at 1731243627436 2024-11-10T13:00:27,436 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-10T13:00:27,501 INFO [RS:0;3857ccc89b65:35149 {}] regionserver.HRegionServer(976): stopping server 3857ccc89b65,35149,1731243532592; all regions closed. 2024-11-10T13:00:27,503 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:27,503 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:27,503 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:27,504 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:27,504 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:27,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42169 is added to blk_1073741834_1010 (size=3066) 2024-11-10T13:00:27,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36393 is added to blk_1073741834_1010 (size=3066) 2024-11-10T13:00:27,510 DEBUG [RS:0;3857ccc89b65:35149 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/oldWALs 2024-11-10T13:00:27,510 INFO [RS:0;3857ccc89b65:35149 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3857ccc89b65%2C35149%2C1731243532592.meta:.meta(num 1731243534349) 2024-11-10T13:00:27,510 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:27,511 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:27,511 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:27,511 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:27,511 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:27,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42169 is added to blk_1073741847_1023 (size=12695) 2024-11-10T13:00:27,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36393 is added to blk_1073741847_1023 (size=12695) 2024-11-10T13:00:27,517 DEBUG [RS:0;3857ccc89b65:35149 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/oldWALs 2024-11-10T13:00:27,517 INFO [RS:0;3857ccc89b65:35149 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3857ccc89b65%2C35149%2C1731243532592:(num 1731243607253) 2024-11-10T13:00:27,517 DEBUG [RS:0;3857ccc89b65:35149 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T13:00:27,517 INFO [RS:0;3857ccc89b65:35149 {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T13:00:27,517 INFO [RS:0;3857ccc89b65:35149 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-10T13:00:27,517 INFO [RS:0;3857ccc89b65:35149 {}] hbase.ChoreService(370): Chore service for: regionserver/3857ccc89b65:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-10T13:00:27,518 INFO [RS:0;3857ccc89b65:35149 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-10T13:00:27,518 INFO [regionserver/3857ccc89b65:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-10T13:00:27,518 INFO [RS:0;3857ccc89b65:35149 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35149 2024-11-10T13:00:27,522 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35149-0x10101f5c5890001, quorum=127.0.0.1:59930, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3857ccc89b65,35149,1731243532592 2024-11-10T13:00:27,522 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43891-0x10101f5c5890000, quorum=127.0.0.1:59930, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-10T13:00:27,522 INFO [RS:0;3857ccc89b65:35149 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-10T13:00:27,523 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3857ccc89b65,35149,1731243532592] 2024-11-10T13:00:27,526 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3857ccc89b65,35149,1731243532592 already deleted, retry=false 2024-11-10T13:00:27,526 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3857ccc89b65,35149,1731243532592 expired; onlineServers=0 2024-11-10T13:00:27,526 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '3857ccc89b65,43891,1731243531861' ***** 2024-11-10T13:00:27,526 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-10T13:00:27,527 INFO [M:0;3857ccc89b65:43891 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-10T13:00:27,527 INFO [M:0;3857ccc89b65:43891 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-10T13:00:27,527 DEBUG [M:0;3857ccc89b65:43891 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-10T13:00:27,527 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-10T13:00:27,527 DEBUG [M:0;3857ccc89b65:43891 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-10T13:00:27,527 DEBUG [master/3857ccc89b65:0:becomeActiveMaster-HFileCleaner.large.0-1731243533607 {}] cleaner.HFileCleaner(306): Exit Thread[master/3857ccc89b65:0:becomeActiveMaster-HFileCleaner.large.0-1731243533607,5,FailOnTimeoutGroup] 2024-11-10T13:00:27,527 DEBUG [master/3857ccc89b65:0:becomeActiveMaster-HFileCleaner.small.0-1731243533609 {}] cleaner.HFileCleaner(306): Exit Thread[master/3857ccc89b65:0:becomeActiveMaster-HFileCleaner.small.0-1731243533609,5,FailOnTimeoutGroup] 2024-11-10T13:00:27,527 INFO [M:0;3857ccc89b65:43891 {}] hbase.ChoreService(370): Chore service for: master/3857ccc89b65:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-10T13:00:27,527 INFO [M:0;3857ccc89b65:43891 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-10T13:00:27,527 DEBUG [M:0;3857ccc89b65:43891 {}] master.HMaster(1795): Stopping service threads 2024-11-10T13:00:27,527 INFO [M:0;3857ccc89b65:43891 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-10T13:00:27,528 INFO [M:0;3857ccc89b65:43891 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-10T13:00:27,528 INFO [M:0;3857ccc89b65:43891 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-10T13:00:27,528 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-10T13:00:27,529 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43891-0x10101f5c5890000, quorum=127.0.0.1:59930, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-10T13:00:27,529 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43891-0x10101f5c5890000, quorum=127.0.0.1:59930, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:00:27,529 DEBUG [M:0;3857ccc89b65:43891 {}] zookeeper.ZKUtil(347): master:43891-0x10101f5c5890000, quorum=127.0.0.1:59930, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-10T13:00:27,529 WARN [M:0;3857ccc89b65:43891 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-10T13:00:27,530 INFO [M:0;3857ccc89b65:43891 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/.lastflushedseqids 2024-11-10T13:00:27,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42169 is added to blk_1073741854_1030 (size=130) 2024-11-10T13:00:27,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36393 is added to blk_1073741854_1030 (size=130) 2024-11-10T13:00:27,542 INFO [M:0;3857ccc89b65:43891 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-10T13:00:27,542 INFO [M:0;3857ccc89b65:43891 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-10T13:00:27,543 DEBUG [M:0;3857ccc89b65:43891 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-10T13:00:27,543 INFO [M:0;3857ccc89b65:43891 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T13:00:27,543 DEBUG [M:0;3857ccc89b65:43891 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T13:00:27,543 DEBUG [M:0;3857ccc89b65:43891 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-10T13:00:27,543 DEBUG [M:0;3857ccc89b65:43891 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T13:00:27,543 INFO [M:0;3857ccc89b65:43891 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.04 KB heapSize=29.21 KB 2024-11-10T13:00:27,561 DEBUG [M:0;3857ccc89b65:43891 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ec9257bca69e4e938ffacd77048b3d1e is 82, key is hbase:meta,,1/info:regioninfo/1731243534424/Put/seqid=0 2024-11-10T13:00:27,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42169 is added to blk_1073741855_1031 (size=5672) 2024-11-10T13:00:27,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36393 is added to blk_1073741855_1031 (size=5672) 2024-11-10T13:00:27,567 INFO [M:0;3857ccc89b65:43891 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ec9257bca69e4e938ffacd77048b3d1e 2024-11-10T13:00:27,590 DEBUG [M:0;3857ccc89b65:43891 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a99682ee9c66440781b36c0ce6443bdf is 767, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731243535689/Put/seqid=0 2024-11-10T13:00:27,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36393 is added to blk_1073741856_1032 (size=6248) 2024-11-10T13:00:27,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42169 is added to blk_1073741856_1032 (size=6248) 2024-11-10T13:00:27,596 INFO [M:0;3857ccc89b65:43891 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.43 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a99682ee9c66440781b36c0ce6443bdf 2024-11-10T13:00:27,602 INFO [M:0;3857ccc89b65:43891 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for a99682ee9c66440781b36c0ce6443bdf 2024-11-10T13:00:27,618 DEBUG [M:0;3857ccc89b65:43891 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7ade98d3d1d644f3a12fbf1a7580a167 is 69, key is 3857ccc89b65,35149,1731243532592/rs:state/1731243533646/Put/seqid=0 2024-11-10T13:00:27,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36393 is added to blk_1073741857_1033 (size=5156) 2024-11-10T13:00:27,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42169 is added to blk_1073741857_1033 (size=5156) 2024-11-10T13:00:27,625 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35149-0x10101f5c5890001, quorum=127.0.0.1:59930, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T13:00:27,625 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35149-0x10101f5c5890001, quorum=127.0.0.1:59930, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T13:00:27,625 INFO [RS:0;3857ccc89b65:35149 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-10T13:00:27,625 INFO [M:0;3857ccc89b65:43891 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7ade98d3d1d644f3a12fbf1a7580a167 2024-11-10T13:00:27,626 INFO [RS:0;3857ccc89b65:35149 {}] regionserver.HRegionServer(1031): Exiting; stopping=3857ccc89b65,35149,1731243532592; zookeeper connection closed. 2024-11-10T13:00:27,626 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@276f0be2 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@276f0be2 2024-11-10T13:00:27,626 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-10T13:00:27,650 DEBUG [M:0;3857ccc89b65:43891 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0b025ec59f6d40d58191c0a0ddc7e8fa is 52, key is load_balancer_on/state:d/1731243534778/Put/seqid=0 2024-11-10T13:00:27,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42169 is added to blk_1073741858_1034 (size=5056) 2024-11-10T13:00:27,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36393 is added to blk_1073741858_1034 (size=5056) 2024-11-10T13:00:27,657 INFO [M:0;3857ccc89b65:43891 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0b025ec59f6d40d58191c0a0ddc7e8fa 2024-11-10T13:00:27,666 DEBUG [M:0;3857ccc89b65:43891 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ec9257bca69e4e938ffacd77048b3d1e as hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ec9257bca69e4e938ffacd77048b3d1e 2024-11-10T13:00:27,673 INFO [M:0;3857ccc89b65:43891 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ec9257bca69e4e938ffacd77048b3d1e, entries=8, sequenceid=59, filesize=5.5 K 2024-11-10T13:00:27,674 DEBUG [M:0;3857ccc89b65:43891 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a99682ee9c66440781b36c0ce6443bdf as hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a99682ee9c66440781b36c0ce6443bdf 2024-11-10T13:00:27,682 INFO [M:0;3857ccc89b65:43891 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for a99682ee9c66440781b36c0ce6443bdf 2024-11-10T13:00:27,682 INFO [M:0;3857ccc89b65:43891 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a99682ee9c66440781b36c0ce6443bdf, entries=6, sequenceid=59, filesize=6.1 K 2024-11-10T13:00:27,683 DEBUG [M:0;3857ccc89b65:43891 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7ade98d3d1d644f3a12fbf1a7580a167 as hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/7ade98d3d1d644f3a12fbf1a7580a167 2024-11-10T13:00:27,690 INFO [M:0;3857ccc89b65:43891 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/7ade98d3d1d644f3a12fbf1a7580a167, entries=1, sequenceid=59, filesize=5.0 K 2024-11-10T13:00:27,691 DEBUG [M:0;3857ccc89b65:43891 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0b025ec59f6d40d58191c0a0ddc7e8fa as hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/0b025ec59f6d40d58191c0a0ddc7e8fa 2024-11-10T13:00:27,698 INFO [M:0;3857ccc89b65:43891 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/0b025ec59f6d40d58191c0a0ddc7e8fa, entries=1, sequenceid=59, filesize=4.9 K 2024-11-10T13:00:27,699 INFO [M:0;3857ccc89b65:43891 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.04 KB/23588, heapSize ~29.15 KB/29848, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 156ms, sequenceid=59, compaction requested=false 2024-11-10T13:00:27,701 INFO [M:0;3857ccc89b65:43891 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T13:00:27,701 DEBUG [M:0;3857ccc89b65:43891 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731243627542Disabling compacts and flushes for region at 1731243627542Disabling writes for close at 1731243627543 (+1 ms)Obtaining lock to block concurrent updates at 1731243627543Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731243627543Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23588, getHeapSize=29848, getOffHeapSize=0, getCellsCount=70 at 1731243627543Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731243627544 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731243627544Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731243627560 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731243627560Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731243627574 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731243627589 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731243627589Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731243627602 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731243627617 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731243627617Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731243627632 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731243627649 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731243627649Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@69dd38f8: reopening flushed file at 1731243627665 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2215ab7c: reopening flushed file at 1731243627673 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7213efb5: reopening flushed file at 1731243627682 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@14b2c4ff: reopening flushed file at 1731243627690 (+8 ms)Finished flush of dataSize ~23.04 KB/23588, heapSize ~29.15 KB/29848, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 156ms, sequenceid=59, compaction requested=false at 1731243627699 (+9 ms)Writing region close event to WAL at 1731243627701 (+2 ms)Closed at 1731243627701 2024-11-10T13:00:27,702 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:27,702 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:27,702 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:27,702 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:27,702 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:27,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42169 is added to blk_1073741830_1006 (size=27985) 2024-11-10T13:00:27,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36393 is added to blk_1073741830_1006 (size=27985) 2024-11-10T13:00:27,706 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-10T13:00:27,706 INFO [M:0;3857ccc89b65:43891 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-10T13:00:27,706 INFO [M:0;3857ccc89b65:43891 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43891 2024-11-10T13:00:27,706 INFO [M:0;3857ccc89b65:43891 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-10T13:00:27,739 INFO [regionserver/3857ccc89b65:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T13:00:27,809 INFO [M:0;3857ccc89b65:43891 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-10T13:00:27,809 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43891-0x10101f5c5890000, quorum=127.0.0.1:59930, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T13:00:27,809 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43891-0x10101f5c5890000, quorum=127.0.0.1:59930, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T13:00:27,815 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1bf97579{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T13:00:27,818 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T13:00:27,819 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T13:00:27,819 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T13:00:27,819 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1b94522-9f3b-75da-fc9c-29a541d50b3a/hadoop.log.dir/,STOPPED} 2024-11-10T13:00:27,822 WARN [BP-1773734076-172.17.0.2-1731243528935 heartbeating to localhost/127.0.0.1:44081 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-10T13:00:27,822 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-10T13:00:27,823 WARN [BP-1773734076-172.17.0.2-1731243528935 heartbeating to localhost/127.0.0.1:44081 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1773734076-172.17.0.2-1731243528935 (Datanode Uuid 56e70fc3-2b8b-43a9-b7be-a39851e441ff) service to localhost/127.0.0.1:44081 2024-11-10T13:00:27,823 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-10T13:00:27,824 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1b94522-9f3b-75da-fc9c-29a541d50b3a/cluster_27ca9261-1007-ab21-93a9-a5d09ba32aac/data/data3/current/BP-1773734076-172.17.0.2-1731243528935 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T13:00:27,824 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1b94522-9f3b-75da-fc9c-29a541d50b3a/cluster_27ca9261-1007-ab21-93a9-a5d09ba32aac/data/data4/current/BP-1773734076-172.17.0.2-1731243528935 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T13:00:27,825 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-10T13:00:27,827 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7b07d1ba{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T13:00:27,828 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T13:00:27,828 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T13:00:27,828 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T13:00:27,828 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1b94522-9f3b-75da-fc9c-29a541d50b3a/hadoop.log.dir/,STOPPED} 2024-11-10T13:00:27,829 WARN [BP-1773734076-172.17.0.2-1731243528935 heartbeating to localhost/127.0.0.1:44081 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-10T13:00:27,829 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-10T13:00:27,829 WARN [BP-1773734076-172.17.0.2-1731243528935 heartbeating to localhost/127.0.0.1:44081 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1773734076-172.17.0.2-1731243528935 (Datanode Uuid 1b9dd577-f21d-4c0f-8c84-741814eaa716) service to localhost/127.0.0.1:44081 2024-11-10T13:00:27,830 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-10T13:00:27,830 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1b94522-9f3b-75da-fc9c-29a541d50b3a/cluster_27ca9261-1007-ab21-93a9-a5d09ba32aac/data/data1/current/BP-1773734076-172.17.0.2-1731243528935 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T13:00:27,830 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1b94522-9f3b-75da-fc9c-29a541d50b3a/cluster_27ca9261-1007-ab21-93a9-a5d09ba32aac/data/data2/current/BP-1773734076-172.17.0.2-1731243528935 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T13:00:27,831 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-10T13:00:27,841 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@735fa16a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-10T13:00:27,841 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T13:00:27,841 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T13:00:27,842 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T13:00:27,842 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1b94522-9f3b-75da-fc9c-29a541d50b3a/hadoop.log.dir/,STOPPED} 2024-11-10T13:00:27,851 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-10T13:00:27,895 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-10T13:00:27,905 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=78 (was 12) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:44081 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/3857ccc89b65:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@77f944e5 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:44081 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44081 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:44081 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44081 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: master/3857ccc89b65:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: master/3857ccc89b65:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44081 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:44081 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:44081 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) - Thread LEAK? -, OpenFileDescriptor=402 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=67 (was 2) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=9112 (was 9661) 2024-11-10T13:00:27,911 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=79, OpenFileDescriptor=402, MaxFileDescriptor=1048576, SystemLoadAverage=67, ProcessCount=11, AvailableMemoryMB=9112 2024-11-10T13:00:27,912 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-10T13:00:27,912 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1b94522-9f3b-75da-fc9c-29a541d50b3a/hadoop.log.dir so I do NOT create it in target/test-data/4f73c49f-92be-712f-b706-5c3ddea78f5f 2024-11-10T13:00:27,912 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1b94522-9f3b-75da-fc9c-29a541d50b3a/hadoop.tmp.dir so I do NOT create it in target/test-data/4f73c49f-92be-712f-b706-5c3ddea78f5f 2024-11-10T13:00:27,912 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f73c49f-92be-712f-b706-5c3ddea78f5f/cluster_5ebc9fdf-c17d-8bbb-a4d0-6f22d9f44e26, deleteOnExit=true 2024-11-10T13:00:27,912 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-10T13:00:27,913 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f73c49f-92be-712f-b706-5c3ddea78f5f/test.cache.data in system properties and HBase conf 2024-11-10T13:00:27,913 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f73c49f-92be-712f-b706-5c3ddea78f5f/hadoop.tmp.dir in system properties and HBase conf 2024-11-10T13:00:27,913 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f73c49f-92be-712f-b706-5c3ddea78f5f/hadoop.log.dir in system properties and HBase conf 2024-11-10T13:00:27,913 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f73c49f-92be-712f-b706-5c3ddea78f5f/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-10T13:00:27,913 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f73c49f-92be-712f-b706-5c3ddea78f5f/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-10T13:00:27,913 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-10T13:00:27,913 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-10T13:00:27,913 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f73c49f-92be-712f-b706-5c3ddea78f5f/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-10T13:00:27,913 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f73c49f-92be-712f-b706-5c3ddea78f5f/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-10T13:00:27,913 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f73c49f-92be-712f-b706-5c3ddea78f5f/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-10T13:00:27,913 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f73c49f-92be-712f-b706-5c3ddea78f5f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-10T13:00:27,914 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f73c49f-92be-712f-b706-5c3ddea78f5f/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-10T13:00:27,914 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f73c49f-92be-712f-b706-5c3ddea78f5f/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-10T13:00:27,914 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f73c49f-92be-712f-b706-5c3ddea78f5f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-10T13:00:27,914 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f73c49f-92be-712f-b706-5c3ddea78f5f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-10T13:00:27,914 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f73c49f-92be-712f-b706-5c3ddea78f5f/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-10T13:00:27,914 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f73c49f-92be-712f-b706-5c3ddea78f5f/nfs.dump.dir in system properties and HBase conf 2024-11-10T13:00:27,914 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f73c49f-92be-712f-b706-5c3ddea78f5f/java.io.tmpdir in system properties and HBase conf 2024-11-10T13:00:27,914 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f73c49f-92be-712f-b706-5c3ddea78f5f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-10T13:00:27,914 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f73c49f-92be-712f-b706-5c3ddea78f5f/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-10T13:00:27,914 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f73c49f-92be-712f-b706-5c3ddea78f5f/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-10T13:00:27,929 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-10T13:00:28,002 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T13:00:28,007 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T13:00:28,008 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T13:00:28,009 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T13:00:28,009 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-10T13:00:28,009 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T13:00:28,010 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@46d26a79{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f73c49f-92be-712f-b706-5c3ddea78f5f/hadoop.log.dir/,AVAILABLE} 2024-11-10T13:00:28,010 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@59505eb5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T13:00:28,129 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1edca743{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f73c49f-92be-712f-b706-5c3ddea78f5f/java.io.tmpdir/jetty-localhost-44687-hadoop-hdfs-3_4_1-tests_jar-_-any-10498694249396189104/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-10T13:00:28,129 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c443180{HTTP/1.1, (http/1.1)}{localhost:44687} 2024-11-10T13:00:28,129 INFO [Time-limited test {}] server.Server(415): Started @101100ms 2024-11-10T13:00:28,148 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-10T13:00:28,222 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T13:00:28,227 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T13:00:28,227 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T13:00:28,227 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T13:00:28,227 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-10T13:00:28,228 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@69a0f3c6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f73c49f-92be-712f-b706-5c3ddea78f5f/hadoop.log.dir/,AVAILABLE} 2024-11-10T13:00:28,228 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@469dec96{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T13:00:28,344 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1a15ed6a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f73c49f-92be-712f-b706-5c3ddea78f5f/java.io.tmpdir/jetty-localhost-45689-hadoop-hdfs-3_4_1-tests_jar-_-any-3584308615680926976/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T13:00:28,344 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@b5aedfa{HTTP/1.1, (http/1.1)}{localhost:45689} 2024-11-10T13:00:28,344 INFO [Time-limited test {}] server.Server(415): Started @101315ms 2024-11-10T13:00:28,346 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-10T13:00:28,390 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T13:00:28,393 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T13:00:28,394 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T13:00:28,394 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T13:00:28,394 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-10T13:00:28,395 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@194f043a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f73c49f-92be-712f-b706-5c3ddea78f5f/hadoop.log.dir/,AVAILABLE} 2024-11-10T13:00:28,395 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@274298f3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T13:00:28,459 WARN [Thread-438 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f73c49f-92be-712f-b706-5c3ddea78f5f/cluster_5ebc9fdf-c17d-8bbb-a4d0-6f22d9f44e26/data/data1/current/BP-484755594-172.17.0.2-1731243627948/current, will proceed with Du for space computation calculation, 2024-11-10T13:00:28,459 WARN [Thread-439 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f73c49f-92be-712f-b706-5c3ddea78f5f/cluster_5ebc9fdf-c17d-8bbb-a4d0-6f22d9f44e26/data/data2/current/BP-484755594-172.17.0.2-1731243627948/current, will proceed with Du for space computation calculation, 2024-11-10T13:00:28,483 WARN [Thread-417 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-10T13:00:28,487 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb284f07fff21ccbd with lease ID 0x93829b97e450e087: Processing first storage report for DS-416e6e6f-45af-4d10-aa32-0842a1139972 from datanode DatanodeRegistration(127.0.0.1:40121, datanodeUuid=b6175fe2-3a73-4ed5-b908-b5291f10ad62, infoPort=44775, infoSecurePort=0, ipcPort=41927, storageInfo=lv=-57;cid=testClusterID;nsid=1262754640;c=1731243627948) 2024-11-10T13:00:28,487 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb284f07fff21ccbd with lease ID 0x93829b97e450e087: from storage DS-416e6e6f-45af-4d10-aa32-0842a1139972 node DatanodeRegistration(127.0.0.1:40121, datanodeUuid=b6175fe2-3a73-4ed5-b908-b5291f10ad62, infoPort=44775, infoSecurePort=0, ipcPort=41927, storageInfo=lv=-57;cid=testClusterID;nsid=1262754640;c=1731243627948), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T13:00:28,487 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb284f07fff21ccbd with lease ID 0x93829b97e450e087: Processing first storage report for DS-450fedc2-c01b-4e53-8c42-68100cef176a from datanode DatanodeRegistration(127.0.0.1:40121, datanodeUuid=b6175fe2-3a73-4ed5-b908-b5291f10ad62, infoPort=44775, infoSecurePort=0, ipcPort=41927, storageInfo=lv=-57;cid=testClusterID;nsid=1262754640;c=1731243627948) 2024-11-10T13:00:28,487 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb284f07fff21ccbd with lease ID 0x93829b97e450e087: from storage DS-450fedc2-c01b-4e53-8c42-68100cef176a node DatanodeRegistration(127.0.0.1:40121, datanodeUuid=b6175fe2-3a73-4ed5-b908-b5291f10ad62, infoPort=44775, infoSecurePort=0, ipcPort=41927, storageInfo=lv=-57;cid=testClusterID;nsid=1262754640;c=1731243627948), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T13:00:28,523 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@18492d7d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f73c49f-92be-712f-b706-5c3ddea78f5f/java.io.tmpdir/jetty-localhost-41799-hadoop-hdfs-3_4_1-tests_jar-_-any-222192546121595518/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T13:00:28,523 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@30ebe7e3{HTTP/1.1, (http/1.1)}{localhost:41799} 2024-11-10T13:00:28,523 INFO [Time-limited test {}] server.Server(415): Started @101494ms 2024-11-10T13:00:28,526 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-10T13:00:28,638 WARN [Thread-464 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f73c49f-92be-712f-b706-5c3ddea78f5f/cluster_5ebc9fdf-c17d-8bbb-a4d0-6f22d9f44e26/data/data3/current/BP-484755594-172.17.0.2-1731243627948/current, will proceed with Du for space computation calculation, 2024-11-10T13:00:28,638 WARN [Thread-465 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f73c49f-92be-712f-b706-5c3ddea78f5f/cluster_5ebc9fdf-c17d-8bbb-a4d0-6f22d9f44e26/data/data4/current/BP-484755594-172.17.0.2-1731243627948/current, will proceed with Du for space computation calculation, 2024-11-10T13:00:28,656 WARN [Thread-453 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-10T13:00:28,658 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf9530fb55288530 with lease ID 0x93829b97e450e088: Processing first storage report for DS-15e98b24-7383-49aa-abf7-ecd492a25a30 from datanode DatanodeRegistration(127.0.0.1:46083, datanodeUuid=1e729ff2-46fc-4247-bb38-497158fe93bf, infoPort=41755, infoSecurePort=0, ipcPort=37829, storageInfo=lv=-57;cid=testClusterID;nsid=1262754640;c=1731243627948) 2024-11-10T13:00:28,659 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf9530fb55288530 with lease ID 0x93829b97e450e088: from storage DS-15e98b24-7383-49aa-abf7-ecd492a25a30 node DatanodeRegistration(127.0.0.1:46083, datanodeUuid=1e729ff2-46fc-4247-bb38-497158fe93bf, infoPort=41755, infoSecurePort=0, ipcPort=37829, storageInfo=lv=-57;cid=testClusterID;nsid=1262754640;c=1731243627948), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-10T13:00:28,659 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf9530fb55288530 with lease ID 0x93829b97e450e088: Processing first storage report for DS-304c38d9-d723-449e-a843-4ae8aecb7a8d from datanode DatanodeRegistration(127.0.0.1:46083, datanodeUuid=1e729ff2-46fc-4247-bb38-497158fe93bf, infoPort=41755, infoSecurePort=0, ipcPort=37829, storageInfo=lv=-57;cid=testClusterID;nsid=1262754640;c=1731243627948) 2024-11-10T13:00:28,659 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf9530fb55288530 with lease ID 0x93829b97e450e088: from storage DS-304c38d9-d723-449e-a843-4ae8aecb7a8d node DatanodeRegistration(127.0.0.1:46083, datanodeUuid=1e729ff2-46fc-4247-bb38-497158fe93bf, infoPort=41755, infoSecurePort=0, ipcPort=37829, storageInfo=lv=-57;cid=testClusterID;nsid=1262754640;c=1731243627948), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T13:00:28,756 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f73c49f-92be-712f-b706-5c3ddea78f5f 2024-11-10T13:00:28,759 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f73c49f-92be-712f-b706-5c3ddea78f5f/cluster_5ebc9fdf-c17d-8bbb-a4d0-6f22d9f44e26/zookeeper_0, clientPort=57375, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f73c49f-92be-712f-b706-5c3ddea78f5f/cluster_5ebc9fdf-c17d-8bbb-a4d0-6f22d9f44e26/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f73c49f-92be-712f-b706-5c3ddea78f5f/cluster_5ebc9fdf-c17d-8bbb-a4d0-6f22d9f44e26/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-10T13:00:28,760 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=57375 2024-11-10T13:00:28,760 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:00:28,762 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:00:28,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46083 is added to blk_1073741825_1001 (size=7) 2024-11-10T13:00:28,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40121 is added to blk_1073741825_1001 (size=7) 2024-11-10T13:00:28,775 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:41629/user/jenkins/test-data/d01b42c9-c651-1714-66ee-0c83efec71e1 with version=8 2024-11-10T13:00:28,775 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/hbase-staging 2024-11-10T13:00:28,777 INFO [Time-limited test {}] client.ConnectionUtils(128): master/3857ccc89b65:0 server-side Connection retries=45 2024-11-10T13:00:28,777 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T13:00:28,777 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-10T13:00:28,777 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-10T13:00:28,777 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T13:00:28,777 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-10T13:00:28,778 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-10T13:00:28,778 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-10T13:00:28,778 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37919 2024-11-10T13:00:28,780 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:37919 connecting to ZooKeeper ensemble=127.0.0.1:57375 2024-11-10T13:00:28,785 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:379190x0, quorum=127.0.0.1:57375, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-10T13:00:28,785 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:37919-0x10101f743660000 connected 2024-11-10T13:00:28,801 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:00:28,803 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:00:28,805 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37919-0x10101f743660000, quorum=127.0.0.1:57375, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T13:00:28,806 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:41629/user/jenkins/test-data/d01b42c9-c651-1714-66ee-0c83efec71e1, hbase.cluster.distributed=false 2024-11-10T13:00:28,807 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37919-0x10101f743660000, quorum=127.0.0.1:57375, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-10T13:00:28,808 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37919 2024-11-10T13:00:28,808 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37919 2024-11-10T13:00:28,808 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37919 2024-11-10T13:00:28,808 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37919 2024-11-10T13:00:28,809 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37919 2024-11-10T13:00:28,826 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3857ccc89b65:0 server-side Connection retries=45 2024-11-10T13:00:28,827 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T13:00:28,827 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-10T13:00:28,827 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-10T13:00:28,827 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T13:00:28,827 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-10T13:00:28,827 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-10T13:00:28,827 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-10T13:00:28,828 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37299 2024-11-10T13:00:28,830 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37299 connecting to ZooKeeper ensemble=127.0.0.1:57375 2024-11-10T13:00:28,831 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:00:28,834 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:00:28,840 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:372990x0, quorum=127.0.0.1:57375, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-10T13:00:28,840 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37299-0x10101f743660001, quorum=127.0.0.1:57375, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T13:00:28,840 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37299-0x10101f743660001 connected 2024-11-10T13:00:28,841 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-10T13:00:28,842 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-10T13:00:28,842 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37299-0x10101f743660001, quorum=127.0.0.1:57375, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-10T13:00:28,844 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37299-0x10101f743660001, quorum=127.0.0.1:57375, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-10T13:00:28,844 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37299 2024-11-10T13:00:28,848 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37299 2024-11-10T13:00:28,848 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37299 2024-11-10T13:00:28,852 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37299 2024-11-10T13:00:28,852 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37299 2024-11-10T13:00:28,865 DEBUG [M:0;3857ccc89b65:37919 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;3857ccc89b65:37919 2024-11-10T13:00:28,866 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/3857ccc89b65,37919,1731243628777 2024-11-10T13:00:28,868 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37299-0x10101f743660001, quorum=127.0.0.1:57375, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T13:00:28,868 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37919-0x10101f743660000, quorum=127.0.0.1:57375, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T13:00:28,868 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37919-0x10101f743660000, quorum=127.0.0.1:57375, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/3857ccc89b65,37919,1731243628777 2024-11-10T13:00:28,870 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37299-0x10101f743660001, quorum=127.0.0.1:57375, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-10T13:00:28,870 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37919-0x10101f743660000, quorum=127.0.0.1:57375, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:00:28,870 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37299-0x10101f743660001, quorum=127.0.0.1:57375, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:00:28,871 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37919-0x10101f743660000, quorum=127.0.0.1:57375, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-10T13:00:28,872 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/3857ccc89b65,37919,1731243628777 from backup master directory 2024-11-10T13:00:28,878 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37919-0x10101f743660000, quorum=127.0.0.1:57375, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/3857ccc89b65,37919,1731243628777 2024-11-10T13:00:28,878 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37299-0x10101f743660001, quorum=127.0.0.1:57375, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T13:00:28,878 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37919-0x10101f743660000, quorum=127.0.0.1:57375, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T13:00:28,878 WARN [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-10T13:00:28,878 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=3857ccc89b65,37919,1731243628777 2024-11-10T13:00:28,886 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:41629/user/jenkins/test-data/d01b42c9-c651-1714-66ee-0c83efec71e1/hbase.id] with ID: 1db1ddf0-19ba-4247-bc12-ecadb6518532 2024-11-10T13:00:28,886 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:41629/user/jenkins/test-data/d01b42c9-c651-1714-66ee-0c83efec71e1/.tmp/hbase.id 2024-11-10T13:00:28,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40121 is added to blk_1073741826_1002 (size=42) 2024-11-10T13:00:28,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46083 is added to blk_1073741826_1002 (size=42) 2024-11-10T13:00:28,893 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:41629/user/jenkins/test-data/d01b42c9-c651-1714-66ee-0c83efec71e1/.tmp/hbase.id]:[hdfs://localhost:41629/user/jenkins/test-data/d01b42c9-c651-1714-66ee-0c83efec71e1/hbase.id] 2024-11-10T13:00:28,907 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:00:28,907 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-10T13:00:28,909 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-10T13:00:28,912 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37919-0x10101f743660000, quorum=127.0.0.1:57375, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:00:28,912 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37299-0x10101f743660001, quorum=127.0.0.1:57375, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:00:28,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46083 is added to blk_1073741827_1003 (size=196) 2024-11-10T13:00:28,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40121 is added to blk_1073741827_1003 (size=196) 2024-11-10T13:00:28,924 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-10T13:00:28,925 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-10T13:00:28,926 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-10T13:00:28,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40121 is added to blk_1073741828_1004 (size=1189) 2024-11-10T13:00:28,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46083 is added to blk_1073741828_1004 (size=1189) 2024-11-10T13:00:28,935 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:41629/user/jenkins/test-data/d01b42c9-c651-1714-66ee-0c83efec71e1/MasterData/data/master/store 2024-11-10T13:00:28,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46083 is added to blk_1073741829_1005 (size=34) 2024-11-10T13:00:28,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40121 is added to blk_1073741829_1005 (size=34) 2024-11-10T13:00:28,947 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T13:00:28,947 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-10T13:00:28,947 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T13:00:28,947 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T13:00:28,947 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-10T13:00:28,947 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T13:00:28,947 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T13:00:28,948 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731243628947Disabling compacts and flushes for region at 1731243628947Disabling writes for close at 1731243628947Writing region close event to WAL at 1731243628947Closed at 1731243628947 2024-11-10T13:00:28,949 WARN [master/3857ccc89b65:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:41629/user/jenkins/test-data/d01b42c9-c651-1714-66ee-0c83efec71e1/MasterData/data/master/store/.initializing 2024-11-10T13:00:28,949 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41629/user/jenkins/test-data/d01b42c9-c651-1714-66ee-0c83efec71e1/MasterData/WALs/3857ccc89b65,37919,1731243628777 2024-11-10T13:00:28,953 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3857ccc89b65%2C37919%2C1731243628777, suffix=, logDir=hdfs://localhost:41629/user/jenkins/test-data/d01b42c9-c651-1714-66ee-0c83efec71e1/MasterData/WALs/3857ccc89b65,37919,1731243628777, archiveDir=hdfs://localhost:41629/user/jenkins/test-data/d01b42c9-c651-1714-66ee-0c83efec71e1/MasterData/oldWALs, maxLogs=10 2024-11-10T13:00:28,954 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3857ccc89b65%2C37919%2C1731243628777.1731243628953 2024-11-10T13:00:28,959 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d01b42c9-c651-1714-66ee-0c83efec71e1/MasterData/WALs/3857ccc89b65,37919,1731243628777/3857ccc89b65%2C37919%2C1731243628777.1731243628953 2024-11-10T13:00:28,964 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41755:41755),(127.0.0.1/127.0.0.1:44775:44775)] 2024-11-10T13:00:28,965 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-10T13:00:28,965 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T13:00:28,965 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:00:28,965 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:00:28,967 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:00:28,969 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-10T13:00:28,969 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:00:28,970 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:00:28,970 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:00:28,971 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-10T13:00:28,972 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:00:28,972 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T13:00:28,972 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:00:28,975 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-10T13:00:28,975 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:00:28,976 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T13:00:28,976 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:00:28,977 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-10T13:00:28,977 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:00:28,978 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T13:00:28,978 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:00:28,979 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41629/user/jenkins/test-data/d01b42c9-c651-1714-66ee-0c83efec71e1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:00:28,979 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41629/user/jenkins/test-data/d01b42c9-c651-1714-66ee-0c83efec71e1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:00:28,981 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:00:28,981 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:00:28,982 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-10T13:00:28,983 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:00:28,988 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41629/user/jenkins/test-data/d01b42c9-c651-1714-66ee-0c83efec71e1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T13:00:28,988 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=806155, jitterRate=0.02507956326007843}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-10T13:00:28,989 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731243628966Initializing all the Stores at 1731243628967 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731243628967Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731243628967Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731243628967Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731243628967Cleaning up temporary data from old regions at 1731243628981 (+14 ms)Region opened successfully at 1731243628989 (+8 ms) 2024-11-10T13:00:28,990 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-10T13:00:28,994 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5384651d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3857ccc89b65/172.17.0.2:0 2024-11-10T13:00:28,995 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-10T13:00:28,995 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-10T13:00:28,995 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-10T13:00:28,995 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-10T13:00:28,996 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-10T13:00:28,996 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-10T13:00:28,996 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-10T13:00:28,999 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-10T13:00:28,999 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37919-0x10101f743660000, quorum=127.0.0.1:57375, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-10T13:00:29,001 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-10T13:00:29,001 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-10T13:00:29,002 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37919-0x10101f743660000, quorum=127.0.0.1:57375, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-10T13:00:29,003 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-10T13:00:29,003 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-10T13:00:29,004 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37919-0x10101f743660000, quorum=127.0.0.1:57375, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-10T13:00:29,007 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-10T13:00:29,008 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37919-0x10101f743660000, quorum=127.0.0.1:57375, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-10T13:00:29,009 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-10T13:00:29,011 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37919-0x10101f743660000, quorum=127.0.0.1:57375, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-10T13:00:29,012 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-10T13:00:29,014 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37919-0x10101f743660000, quorum=127.0.0.1:57375, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-10T13:00:29,014 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37299-0x10101f743660001, quorum=127.0.0.1:57375, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-10T13:00:29,014 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37919-0x10101f743660000, quorum=127.0.0.1:57375, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:00:29,014 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37299-0x10101f743660001, quorum=127.0.0.1:57375, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:00:29,015 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=3857ccc89b65,37919,1731243628777, sessionid=0x10101f743660000, setting cluster-up flag (Was=false) 2024-11-10T13:00:29,019 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37299-0x10101f743660001, quorum=127.0.0.1:57375, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:00:29,019 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37919-0x10101f743660000, quorum=127.0.0.1:57375, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:00:29,030 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-10T13:00:29,032 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3857ccc89b65,37919,1731243628777 2024-11-10T13:00:29,035 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37919-0x10101f743660000, quorum=127.0.0.1:57375, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:00:29,035 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37299-0x10101f743660001, quorum=127.0.0.1:57375, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:00:29,041 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-10T13:00:29,042 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3857ccc89b65,37919,1731243628777 2024-11-10T13:00:29,043 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:41629/user/jenkins/test-data/d01b42c9-c651-1714-66ee-0c83efec71e1/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-10T13:00:29,045 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-10T13:00:29,045 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-10T13:00:29,046 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-10T13:00:29,046 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 3857ccc89b65,37919,1731243628777 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-10T13:00:29,047 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/3857ccc89b65:0, corePoolSize=5, maxPoolSize=5 2024-11-10T13:00:29,047 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/3857ccc89b65:0, corePoolSize=5, maxPoolSize=5 2024-11-10T13:00:29,047 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/3857ccc89b65:0, corePoolSize=5, maxPoolSize=5 2024-11-10T13:00:29,047 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/3857ccc89b65:0, corePoolSize=5, maxPoolSize=5 2024-11-10T13:00:29,047 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/3857ccc89b65:0, corePoolSize=10, maxPoolSize=10 2024-11-10T13:00:29,048 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:00:29,048 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/3857ccc89b65:0, corePoolSize=2, maxPoolSize=2 2024-11-10T13:00:29,048 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:00:29,048 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731243659048 2024-11-10T13:00:29,049 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-10T13:00:29,049 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-10T13:00:29,049 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-10T13:00:29,049 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-10T13:00:29,049 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-10T13:00:29,049 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-10T13:00:29,049 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-10T13:00:29,050 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-10T13:00:29,050 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-10T13:00:29,050 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-10T13:00:29,050 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-10T13:00:29,050 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-10T13:00:29,051 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:00:29,051 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-10T13:00:29,052 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-10T13:00:29,052 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-10T13:00:29,052 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/3857ccc89b65:0:becomeActiveMaster-HFileCleaner.large.0-1731243629052,5,FailOnTimeoutGroup] 2024-11-10T13:00:29,053 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/3857ccc89b65:0:becomeActiveMaster-HFileCleaner.small.0-1731243629052,5,FailOnTimeoutGroup] 2024-11-10T13:00:29,053 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-10T13:00:29,053 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-10T13:00:29,053 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-10T13:00:29,053 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-10T13:00:29,055 INFO [RS:0;3857ccc89b65:37299 {}] regionserver.HRegionServer(746): ClusterId : 1db1ddf0-19ba-4247-bc12-ecadb6518532 2024-11-10T13:00:29,055 DEBUG [RS:0;3857ccc89b65:37299 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-10T13:00:29,057 DEBUG [RS:0;3857ccc89b65:37299 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-10T13:00:29,057 DEBUG [RS:0;3857ccc89b65:37299 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-10T13:00:29,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40121 is added to blk_1073741831_1007 (size=1321) 2024-11-10T13:00:29,059 DEBUG [RS:0;3857ccc89b65:37299 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-10T13:00:29,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46083 is added to blk_1073741831_1007 (size=1321) 2024-11-10T13:00:29,060 DEBUG [RS:0;3857ccc89b65:37299 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1c4133e5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3857ccc89b65/172.17.0.2:0 2024-11-10T13:00:29,060 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:41629/user/jenkins/test-data/d01b42c9-c651-1714-66ee-0c83efec71e1/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-10T13:00:29,060 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41629/user/jenkins/test-data/d01b42c9-c651-1714-66ee-0c83efec71e1 2024-11-10T13:00:29,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40121 is added to blk_1073741832_1008 (size=32) 2024-11-10T13:00:29,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46083 is added to blk_1073741832_1008 (size=32) 2024-11-10T13:00:29,068 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T13:00:29,070 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-10T13:00:29,071 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-10T13:00:29,071 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:00:29,072 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:00:29,072 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-10T13:00:29,074 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-10T13:00:29,074 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:00:29,074 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:00:29,074 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-10T13:00:29,076 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-10T13:00:29,076 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:00:29,076 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:00:29,077 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-10T13:00:29,078 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-10T13:00:29,078 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:00:29,078 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:00:29,078 DEBUG [RS:0;3857ccc89b65:37299 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;3857ccc89b65:37299 2024-11-10T13:00:29,079 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-10T13:00:29,079 INFO [RS:0;3857ccc89b65:37299 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-10T13:00:29,079 INFO [RS:0;3857ccc89b65:37299 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-10T13:00:29,079 DEBUG [RS:0;3857ccc89b65:37299 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-10T13:00:29,079 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41629/user/jenkins/test-data/d01b42c9-c651-1714-66ee-0c83efec71e1/data/hbase/meta/1588230740 2024-11-10T13:00:29,080 INFO [RS:0;3857ccc89b65:37299 {}] regionserver.HRegionServer(2659): reportForDuty to master=3857ccc89b65,37919,1731243628777 with port=37299, startcode=1731243628826 2024-11-10T13:00:29,080 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41629/user/jenkins/test-data/d01b42c9-c651-1714-66ee-0c83efec71e1/data/hbase/meta/1588230740 2024-11-10T13:00:29,080 DEBUG [RS:0;3857ccc89b65:37299 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-10T13:00:29,082 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-10T13:00:29,082 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-10T13:00:29,082 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-10T13:00:29,083 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36205, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-10T13:00:29,084 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37919 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3857ccc89b65,37299,1731243628826 2024-11-10T13:00:29,084 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37919 {}] master.ServerManager(517): Registering regionserver=3857ccc89b65,37299,1731243628826 2024-11-10T13:00:29,084 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-10T13:00:29,086 DEBUG [RS:0;3857ccc89b65:37299 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41629/user/jenkins/test-data/d01b42c9-c651-1714-66ee-0c83efec71e1 2024-11-10T13:00:29,086 DEBUG [RS:0;3857ccc89b65:37299 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41629 2024-11-10T13:00:29,086 DEBUG [RS:0;3857ccc89b65:37299 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-10T13:00:29,089 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41629/user/jenkins/test-data/d01b42c9-c651-1714-66ee-0c83efec71e1/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T13:00:29,090 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=812427, jitterRate=0.033055007457733154}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-10T13:00:29,092 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37919-0x10101f743660000, quorum=127.0.0.1:57375, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-10T13:00:29,092 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731243629068Initializing all the Stores at 1731243629069 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731243629069Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731243629069Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731243629069Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731243629069Cleaning up temporary data from old regions at 1731243629082 (+13 ms)Region opened successfully at 1731243629092 (+10 ms) 2024-11-10T13:00:29,092 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-10T13:00:29,092 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-10T13:00:29,092 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-10T13:00:29,092 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-10T13:00:29,092 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-10T13:00:29,092 DEBUG [RS:0;3857ccc89b65:37299 {}] zookeeper.ZKUtil(111): regionserver:37299-0x10101f743660001, quorum=127.0.0.1:57375, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3857ccc89b65,37299,1731243628826 2024-11-10T13:00:29,092 WARN [RS:0;3857ccc89b65:37299 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-10T13:00:29,092 INFO [RS:0;3857ccc89b65:37299 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-10T13:00:29,093 DEBUG [RS:0;3857ccc89b65:37299 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41629/user/jenkins/test-data/d01b42c9-c651-1714-66ee-0c83efec71e1/WALs/3857ccc89b65,37299,1731243628826 2024-11-10T13:00:29,093 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-10T13:00:29,093 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731243629092Disabling compacts and flushes for region at 1731243629092Disabling writes for close at 1731243629092Writing region close event to WAL at 1731243629092Closed at 1731243629092 2024-11-10T13:00:29,093 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3857ccc89b65,37299,1731243628826] 2024-11-10T13:00:29,095 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-10T13:00:29,095 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-10T13:00:29,095 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-10T13:00:29,096 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-10T13:00:29,098 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-10T13:00:29,099 INFO [RS:0;3857ccc89b65:37299 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-10T13:00:29,102 INFO [RS:0;3857ccc89b65:37299 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-10T13:00:29,103 INFO [RS:0;3857ccc89b65:37299 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-10T13:00:29,103 INFO [RS:0;3857ccc89b65:37299 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T13:00:29,103 INFO [RS:0;3857ccc89b65:37299 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-10T13:00:29,104 INFO [RS:0;3857ccc89b65:37299 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-10T13:00:29,104 INFO [RS:0;3857ccc89b65:37299 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-10T13:00:29,105 DEBUG [RS:0;3857ccc89b65:37299 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:00:29,105 DEBUG [RS:0;3857ccc89b65:37299 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:00:29,105 DEBUG [RS:0;3857ccc89b65:37299 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:00:29,105 DEBUG [RS:0;3857ccc89b65:37299 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:00:29,105 DEBUG [RS:0;3857ccc89b65:37299 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:00:29,105 DEBUG [RS:0;3857ccc89b65:37299 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3857ccc89b65:0, corePoolSize=2, maxPoolSize=2 2024-11-10T13:00:29,105 DEBUG [RS:0;3857ccc89b65:37299 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:00:29,105 DEBUG [RS:0;3857ccc89b65:37299 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:00:29,106 DEBUG [RS:0;3857ccc89b65:37299 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:00:29,106 DEBUG [RS:0;3857ccc89b65:37299 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:00:29,106 DEBUG [RS:0;3857ccc89b65:37299 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:00:29,106 DEBUG [RS:0;3857ccc89b65:37299 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:00:29,106 DEBUG [RS:0;3857ccc89b65:37299 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3857ccc89b65:0, corePoolSize=3, maxPoolSize=3 2024-11-10T13:00:29,106 DEBUG [RS:0;3857ccc89b65:37299 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3857ccc89b65:0, corePoolSize=3, maxPoolSize=3 2024-11-10T13:00:29,108 INFO [RS:0;3857ccc89b65:37299 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T13:00:29,108 INFO [RS:0;3857ccc89b65:37299 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T13:00:29,108 INFO [RS:0;3857ccc89b65:37299 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T13:00:29,108 INFO [RS:0;3857ccc89b65:37299 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-10T13:00:29,108 INFO [RS:0;3857ccc89b65:37299 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-10T13:00:29,108 INFO [RS:0;3857ccc89b65:37299 {}] hbase.ChoreService(168): Chore ScheduledChore name=3857ccc89b65,37299,1731243628826-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-10T13:00:29,126 INFO [RS:0;3857ccc89b65:37299 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-10T13:00:29,126 INFO [RS:0;3857ccc89b65:37299 {}] hbase.ChoreService(168): Chore ScheduledChore name=3857ccc89b65,37299,1731243628826-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T13:00:29,126 INFO [RS:0;3857ccc89b65:37299 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T13:00:29,126 INFO [RS:0;3857ccc89b65:37299 {}] regionserver.Replication(171): 3857ccc89b65,37299,1731243628826 started 2024-11-10T13:00:29,142 INFO [RS:0;3857ccc89b65:37299 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T13:00:29,142 INFO [RS:0;3857ccc89b65:37299 {}] regionserver.HRegionServer(1482): Serving as 3857ccc89b65,37299,1731243628826, RpcServer on 3857ccc89b65/172.17.0.2:37299, sessionid=0x10101f743660001 2024-11-10T13:00:29,142 DEBUG [RS:0;3857ccc89b65:37299 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-10T13:00:29,142 DEBUG [RS:0;3857ccc89b65:37299 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3857ccc89b65,37299,1731243628826 2024-11-10T13:00:29,142 DEBUG [RS:0;3857ccc89b65:37299 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3857ccc89b65,37299,1731243628826' 2024-11-10T13:00:29,142 DEBUG [RS:0;3857ccc89b65:37299 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-10T13:00:29,143 DEBUG [RS:0;3857ccc89b65:37299 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-10T13:00:29,144 DEBUG [RS:0;3857ccc89b65:37299 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-10T13:00:29,144 DEBUG [RS:0;3857ccc89b65:37299 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-10T13:00:29,144 DEBUG [RS:0;3857ccc89b65:37299 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3857ccc89b65,37299,1731243628826 2024-11-10T13:00:29,144 DEBUG [RS:0;3857ccc89b65:37299 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3857ccc89b65,37299,1731243628826' 2024-11-10T13:00:29,144 DEBUG [RS:0;3857ccc89b65:37299 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-10T13:00:29,144 DEBUG [RS:0;3857ccc89b65:37299 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-10T13:00:29,145 DEBUG [RS:0;3857ccc89b65:37299 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-10T13:00:29,145 INFO [RS:0;3857ccc89b65:37299 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-10T13:00:29,145 INFO [RS:0;3857ccc89b65:37299 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-10T13:00:29,248 INFO [RS:0;3857ccc89b65:37299 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3857ccc89b65%2C37299%2C1731243628826, suffix=, logDir=hdfs://localhost:41629/user/jenkins/test-data/d01b42c9-c651-1714-66ee-0c83efec71e1/WALs/3857ccc89b65,37299,1731243628826, archiveDir=hdfs://localhost:41629/user/jenkins/test-data/d01b42c9-c651-1714-66ee-0c83efec71e1/oldWALs, maxLogs=32 2024-11-10T13:00:29,248 WARN [3857ccc89b65:37919 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-10T13:00:29,250 INFO [RS:0;3857ccc89b65:37299 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3857ccc89b65%2C37299%2C1731243628826.1731243629249 2024-11-10T13:00:29,257 INFO [RS:0;3857ccc89b65:37299 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d01b42c9-c651-1714-66ee-0c83efec71e1/WALs/3857ccc89b65,37299,1731243628826/3857ccc89b65%2C37299%2C1731243628826.1731243629249 2024-11-10T13:00:29,257 DEBUG [RS:0;3857ccc89b65:37299 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44775:44775),(127.0.0.1/127.0.0.1:41755:41755)] 2024-11-10T13:00:29,499 DEBUG [3857ccc89b65:37919 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-10T13:00:29,499 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=3857ccc89b65,37299,1731243628826 2024-11-10T13:00:29,501 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3857ccc89b65,37299,1731243628826, state=OPENING 2024-11-10T13:00:29,503 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-10T13:00:29,504 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37919-0x10101f743660000, quorum=127.0.0.1:57375, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:00:29,504 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37299-0x10101f743660001, quorum=127.0.0.1:57375, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:00:29,505 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-10T13:00:29,505 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T13:00:29,505 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T13:00:29,505 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=3857ccc89b65,37299,1731243628826}] 2024-11-10T13:00:29,659 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-10T13:00:29,661 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35249, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-10T13:00:29,666 INFO [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-10T13:00:29,666 INFO [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-10T13:00:29,668 INFO [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3857ccc89b65%2C37299%2C1731243628826.meta, suffix=.meta, logDir=hdfs://localhost:41629/user/jenkins/test-data/d01b42c9-c651-1714-66ee-0c83efec71e1/WALs/3857ccc89b65,37299,1731243628826, archiveDir=hdfs://localhost:41629/user/jenkins/test-data/d01b42c9-c651-1714-66ee-0c83efec71e1/oldWALs, maxLogs=32 2024-11-10T13:00:29,671 INFO [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 3857ccc89b65%2C37299%2C1731243628826.meta.1731243629671.meta 2024-11-10T13:00:29,677 INFO [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d01b42c9-c651-1714-66ee-0c83efec71e1/WALs/3857ccc89b65,37299,1731243628826/3857ccc89b65%2C37299%2C1731243628826.meta.1731243629671.meta 2024-11-10T13:00:29,678 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41755:41755),(127.0.0.1/127.0.0.1:44775:44775)] 2024-11-10T13:00:29,679 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-10T13:00:29,679 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-10T13:00:29,679 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-10T13:00:29,680 INFO [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-10T13:00:29,680 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-10T13:00:29,680 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T13:00:29,680 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-10T13:00:29,680 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-10T13:00:29,682 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-10T13:00:29,683 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-10T13:00:29,683 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:00:29,683 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:00:29,683 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-10T13:00:29,684 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-10T13:00:29,684 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:00:29,685 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:00:29,685 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-10T13:00:29,686 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-10T13:00:29,686 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:00:29,686 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:00:29,686 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-10T13:00:29,687 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-10T13:00:29,687 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:00:29,688 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:00:29,688 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-10T13:00:29,689 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41629/user/jenkins/test-data/d01b42c9-c651-1714-66ee-0c83efec71e1/data/hbase/meta/1588230740 2024-11-10T13:00:29,690 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41629/user/jenkins/test-data/d01b42c9-c651-1714-66ee-0c83efec71e1/data/hbase/meta/1588230740 2024-11-10T13:00:29,691 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-10T13:00:29,692 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-10T13:00:29,692 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-10T13:00:29,693 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-10T13:00:29,694 INFO [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=722047, jitterRate=-0.08187039196491241}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-10T13:00:29,694 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-10T13:00:29,695 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731243629680Writing region info on filesystem at 1731243629680Initializing all the Stores at 1731243629681 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731243629681Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731243629682 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731243629682Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731243629682Cleaning up temporary data from old regions at 1731243629692 (+10 ms)Running coprocessor post-open hooks at 1731243629694 (+2 ms)Region opened successfully at 1731243629695 (+1 ms) 2024-11-10T13:00:29,697 INFO [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731243629658 2024-11-10T13:00:29,699 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-10T13:00:29,699 INFO [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-10T13:00:29,700 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=3857ccc89b65,37299,1731243628826 2024-11-10T13:00:29,702 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3857ccc89b65,37299,1731243628826, state=OPEN 2024-11-10T13:00:29,708 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37299-0x10101f743660001, quorum=127.0.0.1:57375, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-10T13:00:29,708 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37919-0x10101f743660000, quorum=127.0.0.1:57375, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-10T13:00:29,708 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=3857ccc89b65,37299,1731243628826 2024-11-10T13:00:29,708 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T13:00:29,708 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T13:00:29,711 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-10T13:00:29,711 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=3857ccc89b65,37299,1731243628826 in 203 msec 2024-11-10T13:00:29,715 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-10T13:00:29,715 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 617 msec 2024-11-10T13:00:29,716 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-10T13:00:29,716 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-10T13:00:29,717 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-10T13:00:29,717 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3857ccc89b65,37299,1731243628826, seqNum=-1] 2024-11-10T13:00:29,718 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-10T13:00:29,719 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53181, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-10T13:00:29,726 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 681 msec 2024-11-10T13:00:29,726 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731243629726, completionTime=-1 2024-11-10T13:00:29,726 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-10T13:00:29,726 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-10T13:00:29,728 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-10T13:00:29,728 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731243689728 2024-11-10T13:00:29,728 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731243749728 2024-11-10T13:00:29,728 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-10T13:00:29,728 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3857ccc89b65,37919,1731243628777-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T13:00:29,728 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3857ccc89b65,37919,1731243628777-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T13:00:29,728 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3857ccc89b65,37919,1731243628777-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T13:00:29,729 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-3857ccc89b65:37919, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T13:00:29,729 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-10T13:00:29,729 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-10T13:00:29,730 DEBUG [master/3857ccc89b65:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-10T13:00:29,733 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.855sec 2024-11-10T13:00:29,733 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-10T13:00:29,733 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-10T13:00:29,733 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-10T13:00:29,733 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-10T13:00:29,733 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-10T13:00:29,733 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3857ccc89b65,37919,1731243628777-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-10T13:00:29,733 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3857ccc89b65,37919,1731243628777-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-10T13:00:29,736 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-10T13:00:29,736 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-10T13:00:29,736 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3857ccc89b65,37919,1731243628777-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T13:00:29,755 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@210f4ee9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-10T13:00:29,755 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 3857ccc89b65,37919,-1 for getting cluster id 2024-11-10T13:00:29,755 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-10T13:00:29,757 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '1db1ddf0-19ba-4247-bc12-ecadb6518532' 2024-11-10T13:00:29,758 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-10T13:00:29,758 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "1db1ddf0-19ba-4247-bc12-ecadb6518532" 2024-11-10T13:00:29,758 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1f852b64, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-10T13:00:29,758 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3857ccc89b65,37919,-1] 2024-11-10T13:00:29,759 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-10T13:00:29,759 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T13:00:29,761 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45862, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-10T13:00:29,762 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7bf88cd2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-10T13:00:29,762 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-10T13:00:29,763 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3857ccc89b65,37299,1731243628826, seqNum=-1] 2024-11-10T13:00:29,763 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-10T13:00:29,765 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38924, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-10T13:00:29,770 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=3857ccc89b65,37919,1731243628777 2024-11-10T13:00:29,770 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:00:29,773 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-10T13:00:29,774 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-10T13:00:29,774 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-10T13:00:29,774 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T13:00:29,774 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T13:00:29,774 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T13:00:29,774 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-10T13:00:29,774 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-10T13:00:29,774 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=2029051909, stopped=false 2024-11-10T13:00:29,774 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=3857ccc89b65,37919,1731243628777 2024-11-10T13:00:29,776 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37299-0x10101f743660001, quorum=127.0.0.1:57375, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-10T13:00:29,776 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37919-0x10101f743660000, quorum=127.0.0.1:57375, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-10T13:00:29,776 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37299-0x10101f743660001, quorum=127.0.0.1:57375, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:00:29,776 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37919-0x10101f743660000, quorum=127.0.0.1:57375, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:00:29,776 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-10T13:00:29,776 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-10T13:00:29,776 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T13:00:29,777 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T13:00:29,777 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:37919-0x10101f743660000, quorum=127.0.0.1:57375, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T13:00:29,777 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3857ccc89b65,37299,1731243628826' ***** 2024-11-10T13:00:29,777 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-10T13:00:29,777 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37299-0x10101f743660001, quorum=127.0.0.1:57375, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T13:00:29,777 INFO [RS:0;3857ccc89b65:37299 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-10T13:00:29,777 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-10T13:00:29,777 INFO [RS:0;3857ccc89b65:37299 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-10T13:00:29,777 INFO [RS:0;3857ccc89b65:37299 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-10T13:00:29,778 INFO [RS:0;3857ccc89b65:37299 {}] regionserver.HRegionServer(959): stopping server 3857ccc89b65,37299,1731243628826 2024-11-10T13:00:29,778 INFO [RS:0;3857ccc89b65:37299 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-10T13:00:29,778 INFO [RS:0;3857ccc89b65:37299 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;3857ccc89b65:37299. 2024-11-10T13:00:29,778 DEBUG [RS:0;3857ccc89b65:37299 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T13:00:29,778 DEBUG [RS:0;3857ccc89b65:37299 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T13:00:29,778 INFO [RS:0;3857ccc89b65:37299 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-10T13:00:29,778 INFO [RS:0;3857ccc89b65:37299 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-10T13:00:29,778 INFO [RS:0;3857ccc89b65:37299 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-10T13:00:29,778 INFO [RS:0;3857ccc89b65:37299 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-10T13:00:29,778 INFO [RS:0;3857ccc89b65:37299 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-10T13:00:29,779 DEBUG [RS:0;3857ccc89b65:37299 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-10T13:00:29,779 DEBUG [RS:0;3857ccc89b65:37299 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-10T13:00:29,779 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-10T13:00:29,779 INFO [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-10T13:00:29,779 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-10T13:00:29,779 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-10T13:00:29,779 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-10T13:00:29,779 INFO [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-10T13:00:29,798 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41629/user/jenkins/test-data/d01b42c9-c651-1714-66ee-0c83efec71e1/data/hbase/meta/1588230740/.tmp/ns/dc15f9f656e34b248d2e1cf680f5a699 is 43, key is default/ns:d/1731243629720/Put/seqid=0 2024-11-10T13:00:29,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46083 is added to blk_1073741835_1011 (size=5153) 2024-11-10T13:00:29,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40121 is added to blk_1073741835_1011 (size=5153) 2024-11-10T13:00:29,804 INFO [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:41629/user/jenkins/test-data/d01b42c9-c651-1714-66ee-0c83efec71e1/data/hbase/meta/1588230740/.tmp/ns/dc15f9f656e34b248d2e1cf680f5a699 2024-11-10T13:00:29,811 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41629/user/jenkins/test-data/d01b42c9-c651-1714-66ee-0c83efec71e1/data/hbase/meta/1588230740/.tmp/ns/dc15f9f656e34b248d2e1cf680f5a699 as hdfs://localhost:41629/user/jenkins/test-data/d01b42c9-c651-1714-66ee-0c83efec71e1/data/hbase/meta/1588230740/ns/dc15f9f656e34b248d2e1cf680f5a699 2024-11-10T13:00:29,818 INFO [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41629/user/jenkins/test-data/d01b42c9-c651-1714-66ee-0c83efec71e1/data/hbase/meta/1588230740/ns/dc15f9f656e34b248d2e1cf680f5a699, entries=2, sequenceid=6, filesize=5.0 K 2024-11-10T13:00:29,819 INFO [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 40ms, sequenceid=6, compaction requested=false 2024-11-10T13:00:29,823 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41629/user/jenkins/test-data/d01b42c9-c651-1714-66ee-0c83efec71e1/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-10T13:00:29,824 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-10T13:00:29,824 INFO [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-10T13:00:29,824 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731243629779Running coprocessor pre-close hooks at 1731243629779Disabling compacts and flushes for region at 1731243629779Disabling writes for close at 1731243629779Obtaining lock to block concurrent updates at 1731243629779Preparing flush snapshotting stores in 1588230740 at 1731243629779Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731243629779Flushing stores of hbase:meta,,1.1588230740 at 1731243629780 (+1 ms)Flushing 1588230740/ns: creating writer at 1731243629780Flushing 1588230740/ns: appending metadata at 1731243629797 (+17 ms)Flushing 1588230740/ns: closing flushed file at 1731243629797Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@45a62f1e: reopening flushed file at 1731243629810 (+13 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 40ms, sequenceid=6, compaction requested=false at 1731243629819 (+9 ms)Writing region close event to WAL at 1731243629820 (+1 ms)Running coprocessor post-close hooks at 1731243629824 (+4 ms)Closed at 1731243629824 2024-11-10T13:00:29,824 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-10T13:00:29,979 INFO [RS:0;3857ccc89b65:37299 {}] regionserver.HRegionServer(976): stopping server 3857ccc89b65,37299,1731243628826; all regions closed. 2024-11-10T13:00:29,980 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:29,980 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:29,980 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:29,980 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:29,980 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:29,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40121 is added to blk_1073741834_1010 (size=1152) 2024-11-10T13:00:29,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46083 is added to blk_1073741834_1010 (size=1152) 2024-11-10T13:00:29,985 DEBUG [RS:0;3857ccc89b65:37299 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/d01b42c9-c651-1714-66ee-0c83efec71e1/oldWALs 2024-11-10T13:00:29,985 INFO [RS:0;3857ccc89b65:37299 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3857ccc89b65%2C37299%2C1731243628826.meta:.meta(num 1731243629671) 2024-11-10T13:00:29,986 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:29,986 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:29,986 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:29,986 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:29,986 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:29,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46083 is added to blk_1073741833_1009 (size=93) 2024-11-10T13:00:29,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40121 is added to blk_1073741833_1009 (size=93) 2024-11-10T13:00:29,990 DEBUG [RS:0;3857ccc89b65:37299 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/d01b42c9-c651-1714-66ee-0c83efec71e1/oldWALs 2024-11-10T13:00:29,990 INFO [RS:0;3857ccc89b65:37299 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3857ccc89b65%2C37299%2C1731243628826:(num 1731243629249) 2024-11-10T13:00:29,990 DEBUG [RS:0;3857ccc89b65:37299 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T13:00:29,990 INFO [RS:0;3857ccc89b65:37299 {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T13:00:29,990 INFO [RS:0;3857ccc89b65:37299 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-10T13:00:29,991 INFO [RS:0;3857ccc89b65:37299 {}] hbase.ChoreService(370): Chore service for: regionserver/3857ccc89b65:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-10T13:00:29,991 INFO [RS:0;3857ccc89b65:37299 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-10T13:00:29,991 INFO [regionserver/3857ccc89b65:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-10T13:00:29,991 INFO [RS:0;3857ccc89b65:37299 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37299 2024-11-10T13:00:29,993 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37919-0x10101f743660000, quorum=127.0.0.1:57375, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-10T13:00:29,993 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37299-0x10101f743660001, quorum=127.0.0.1:57375, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3857ccc89b65,37299,1731243628826 2024-11-10T13:00:29,993 INFO [RS:0;3857ccc89b65:37299 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-10T13:00:29,995 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3857ccc89b65,37299,1731243628826] 2024-11-10T13:00:29,996 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3857ccc89b65,37299,1731243628826 already deleted, retry=false 2024-11-10T13:00:29,996 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3857ccc89b65,37299,1731243628826 expired; onlineServers=0 2024-11-10T13:00:29,996 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '3857ccc89b65,37919,1731243628777' ***** 2024-11-10T13:00:29,996 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-10T13:00:29,996 INFO [M:0;3857ccc89b65:37919 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-10T13:00:29,996 INFO [M:0;3857ccc89b65:37919 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-10T13:00:29,996 DEBUG [M:0;3857ccc89b65:37919 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-10T13:00:29,996 DEBUG [M:0;3857ccc89b65:37919 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-10T13:00:29,996 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-10T13:00:29,996 DEBUG [master/3857ccc89b65:0:becomeActiveMaster-HFileCleaner.large.0-1731243629052 {}] cleaner.HFileCleaner(306): Exit Thread[master/3857ccc89b65:0:becomeActiveMaster-HFileCleaner.large.0-1731243629052,5,FailOnTimeoutGroup] 2024-11-10T13:00:29,996 DEBUG [master/3857ccc89b65:0:becomeActiveMaster-HFileCleaner.small.0-1731243629052 {}] cleaner.HFileCleaner(306): Exit Thread[master/3857ccc89b65:0:becomeActiveMaster-HFileCleaner.small.0-1731243629052,5,FailOnTimeoutGroup] 2024-11-10T13:00:29,997 INFO [M:0;3857ccc89b65:37919 {}] hbase.ChoreService(370): Chore service for: master/3857ccc89b65:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-10T13:00:29,997 INFO [M:0;3857ccc89b65:37919 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-10T13:00:29,997 DEBUG [M:0;3857ccc89b65:37919 {}] master.HMaster(1795): Stopping service threads 2024-11-10T13:00:29,997 INFO [M:0;3857ccc89b65:37919 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-10T13:00:29,997 INFO [M:0;3857ccc89b65:37919 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-10T13:00:29,997 INFO [M:0;3857ccc89b65:37919 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-10T13:00:29,997 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-10T13:00:29,998 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37919-0x10101f743660000, quorum=127.0.0.1:57375, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-10T13:00:29,998 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37919-0x10101f743660000, quorum=127.0.0.1:57375, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:00:29,998 DEBUG [M:0;3857ccc89b65:37919 {}] zookeeper.ZKUtil(347): master:37919-0x10101f743660000, quorum=127.0.0.1:57375, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-10T13:00:29,998 WARN [M:0;3857ccc89b65:37919 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-10T13:00:29,999 INFO [M:0;3857ccc89b65:37919 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:41629/user/jenkins/test-data/d01b42c9-c651-1714-66ee-0c83efec71e1/.lastflushedseqids 2024-11-10T13:00:30,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40121 is added to blk_1073741836_1012 (size=99) 2024-11-10T13:00:30,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46083 is added to blk_1073741836_1012 (size=99) 2024-11-10T13:00:30,005 INFO [M:0;3857ccc89b65:37919 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-10T13:00:30,006 INFO [M:0;3857ccc89b65:37919 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-10T13:00:30,006 DEBUG [M:0;3857ccc89b65:37919 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-10T13:00:30,006 INFO [M:0;3857ccc89b65:37919 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T13:00:30,006 DEBUG [M:0;3857ccc89b65:37919 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T13:00:30,006 DEBUG [M:0;3857ccc89b65:37919 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-10T13:00:30,006 DEBUG [M:0;3857ccc89b65:37919 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T13:00:30,006 INFO [M:0;3857ccc89b65:37919 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-10T13:00:30,024 DEBUG [M:0;3857ccc89b65:37919 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41629/user/jenkins/test-data/d01b42c9-c651-1714-66ee-0c83efec71e1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5648925278404917b63ae61329cb1dcb is 82, key is hbase:meta,,1/info:regioninfo/1731243629700/Put/seqid=0 2024-11-10T13:00:30,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40121 is added to blk_1073741837_1013 (size=5672) 2024-11-10T13:00:30,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46083 is added to blk_1073741837_1013 (size=5672) 2024-11-10T13:00:30,029 INFO [M:0;3857ccc89b65:37919 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:41629/user/jenkins/test-data/d01b42c9-c651-1714-66ee-0c83efec71e1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5648925278404917b63ae61329cb1dcb 2024-11-10T13:00:30,051 DEBUG [M:0;3857ccc89b65:37919 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41629/user/jenkins/test-data/d01b42c9-c651-1714-66ee-0c83efec71e1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5c0d4bd492cb43a48419c3e383d6a6f6 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731243629725/Put/seqid=0 2024-11-10T13:00:30,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46083 is added to blk_1073741838_1014 (size=5275) 2024-11-10T13:00:30,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40121 is added to blk_1073741838_1014 (size=5275) 2024-11-10T13:00:30,057 INFO [M:0;3857ccc89b65:37919 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:41629/user/jenkins/test-data/d01b42c9-c651-1714-66ee-0c83efec71e1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5c0d4bd492cb43a48419c3e383d6a6f6 2024-11-10T13:00:30,079 DEBUG [M:0;3857ccc89b65:37919 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41629/user/jenkins/test-data/d01b42c9-c651-1714-66ee-0c83efec71e1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/68ab8fd4091a433c9e4d841cb5ce9931 is 69, key is 3857ccc89b65,37299,1731243628826/rs:state/1731243629084/Put/seqid=0 2024-11-10T13:00:30,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46083 is added to blk_1073741839_1015 (size=5156) 2024-11-10T13:00:30,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40121 is added to blk_1073741839_1015 (size=5156) 2024-11-10T13:00:30,084 INFO [M:0;3857ccc89b65:37919 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:41629/user/jenkins/test-data/d01b42c9-c651-1714-66ee-0c83efec71e1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/68ab8fd4091a433c9e4d841cb5ce9931 2024-11-10T13:00:30,095 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37299-0x10101f743660001, quorum=127.0.0.1:57375, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T13:00:30,095 INFO [RS:0;3857ccc89b65:37299 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-10T13:00:30,095 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37299-0x10101f743660001, quorum=127.0.0.1:57375, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T13:00:30,095 INFO [RS:0;3857ccc89b65:37299 {}] regionserver.HRegionServer(1031): Exiting; stopping=3857ccc89b65,37299,1731243628826; zookeeper connection closed. 2024-11-10T13:00:30,095 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@471dd4c4 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@471dd4c4 2024-11-10T13:00:30,096 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-10T13:00:30,107 DEBUG [M:0;3857ccc89b65:37919 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41629/user/jenkins/test-data/d01b42c9-c651-1714-66ee-0c83efec71e1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/7c0c73a4d22b40c4b75f8c612d26724f is 52, key is load_balancer_on/state:d/1731243629772/Put/seqid=0 2024-11-10T13:00:30,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40121 is added to blk_1073741840_1016 (size=5056) 2024-11-10T13:00:30,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46083 is added to blk_1073741840_1016 (size=5056) 2024-11-10T13:00:30,112 INFO [M:0;3857ccc89b65:37919 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:41629/user/jenkins/test-data/d01b42c9-c651-1714-66ee-0c83efec71e1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/7c0c73a4d22b40c4b75f8c612d26724f 2024-11-10T13:00:30,119 DEBUG [M:0;3857ccc89b65:37919 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41629/user/jenkins/test-data/d01b42c9-c651-1714-66ee-0c83efec71e1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5648925278404917b63ae61329cb1dcb as hdfs://localhost:41629/user/jenkins/test-data/d01b42c9-c651-1714-66ee-0c83efec71e1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/5648925278404917b63ae61329cb1dcb 2024-11-10T13:00:30,124 INFO [M:0;3857ccc89b65:37919 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41629/user/jenkins/test-data/d01b42c9-c651-1714-66ee-0c83efec71e1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/5648925278404917b63ae61329cb1dcb, entries=8, sequenceid=29, filesize=5.5 K 2024-11-10T13:00:30,125 DEBUG [M:0;3857ccc89b65:37919 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41629/user/jenkins/test-data/d01b42c9-c651-1714-66ee-0c83efec71e1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5c0d4bd492cb43a48419c3e383d6a6f6 as hdfs://localhost:41629/user/jenkins/test-data/d01b42c9-c651-1714-66ee-0c83efec71e1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/5c0d4bd492cb43a48419c3e383d6a6f6 2024-11-10T13:00:30,131 INFO [M:0;3857ccc89b65:37919 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41629/user/jenkins/test-data/d01b42c9-c651-1714-66ee-0c83efec71e1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/5c0d4bd492cb43a48419c3e383d6a6f6, entries=3, sequenceid=29, filesize=5.2 K 2024-11-10T13:00:30,132 DEBUG [M:0;3857ccc89b65:37919 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41629/user/jenkins/test-data/d01b42c9-c651-1714-66ee-0c83efec71e1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/68ab8fd4091a433c9e4d841cb5ce9931 as hdfs://localhost:41629/user/jenkins/test-data/d01b42c9-c651-1714-66ee-0c83efec71e1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/68ab8fd4091a433c9e4d841cb5ce9931 2024-11-10T13:00:30,137 INFO [M:0;3857ccc89b65:37919 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41629/user/jenkins/test-data/d01b42c9-c651-1714-66ee-0c83efec71e1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/68ab8fd4091a433c9e4d841cb5ce9931, entries=1, sequenceid=29, filesize=5.0 K 2024-11-10T13:00:30,138 DEBUG [M:0;3857ccc89b65:37919 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41629/user/jenkins/test-data/d01b42c9-c651-1714-66ee-0c83efec71e1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/7c0c73a4d22b40c4b75f8c612d26724f as hdfs://localhost:41629/user/jenkins/test-data/d01b42c9-c651-1714-66ee-0c83efec71e1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/7c0c73a4d22b40c4b75f8c612d26724f 2024-11-10T13:00:30,144 INFO [M:0;3857ccc89b65:37919 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41629/user/jenkins/test-data/d01b42c9-c651-1714-66ee-0c83efec71e1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/7c0c73a4d22b40c4b75f8c612d26724f, entries=1, sequenceid=29, filesize=4.9 K 2024-11-10T13:00:30,145 INFO [M:0;3857ccc89b65:37919 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 139ms, sequenceid=29, compaction requested=false 2024-11-10T13:00:30,146 INFO [M:0;3857ccc89b65:37919 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T13:00:30,147 DEBUG [M:0;3857ccc89b65:37919 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731243630006Disabling compacts and flushes for region at 1731243630006Disabling writes for close at 1731243630006Obtaining lock to block concurrent updates at 1731243630006Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731243630006Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731243630007 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731243630007Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731243630007Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731243630023 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731243630023Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731243630035 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731243630050 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731243630050Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731243630063 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731243630078 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731243630078Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731243630090 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731243630107 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731243630107Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5dfff8b4: reopening flushed file at 1731243630118 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2b6bb9b8: reopening flushed file at 1731243630125 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2f47814: reopening flushed file at 1731243630131 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@d0ba370: reopening flushed file at 1731243630137 (+6 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 139ms, sequenceid=29, compaction requested=false at 1731243630145 (+8 ms)Writing region close event to WAL at 1731243630146 (+1 ms)Closed at 1731243630146 2024-11-10T13:00:30,147 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:30,147 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:30,147 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:30,147 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:30,147 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:30,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40121 is added to blk_1073741830_1006 (size=10311) 2024-11-10T13:00:30,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46083 is added to blk_1073741830_1006 (size=10311) 2024-11-10T13:00:30,150 INFO [M:0;3857ccc89b65:37919 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-10T13:00:30,150 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-10T13:00:30,150 INFO [M:0;3857ccc89b65:37919 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37919 2024-11-10T13:00:30,151 INFO [M:0;3857ccc89b65:37919 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-10T13:00:30,252 INFO [M:0;3857ccc89b65:37919 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-10T13:00:30,252 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37919-0x10101f743660000, quorum=127.0.0.1:57375, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T13:00:30,253 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37919-0x10101f743660000, quorum=127.0.0.1:57375, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T13:00:30,255 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@18492d7d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T13:00:30,256 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@30ebe7e3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T13:00:30,256 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T13:00:30,256 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@274298f3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T13:00:30,256 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@194f043a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f73c49f-92be-712f-b706-5c3ddea78f5f/hadoop.log.dir/,STOPPED} 2024-11-10T13:00:30,258 WARN [BP-484755594-172.17.0.2-1731243627948 heartbeating to localhost/127.0.0.1:41629 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-10T13:00:30,258 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-10T13:00:30,258 WARN [BP-484755594-172.17.0.2-1731243627948 heartbeating to localhost/127.0.0.1:41629 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-484755594-172.17.0.2-1731243627948 (Datanode Uuid 1e729ff2-46fc-4247-bb38-497158fe93bf) service to localhost/127.0.0.1:41629 2024-11-10T13:00:30,258 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-10T13:00:30,258 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f73c49f-92be-712f-b706-5c3ddea78f5f/cluster_5ebc9fdf-c17d-8bbb-a4d0-6f22d9f44e26/data/data3/current/BP-484755594-172.17.0.2-1731243627948 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T13:00:30,259 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f73c49f-92be-712f-b706-5c3ddea78f5f/cluster_5ebc9fdf-c17d-8bbb-a4d0-6f22d9f44e26/data/data4/current/BP-484755594-172.17.0.2-1731243627948 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T13:00:30,259 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-10T13:00:30,261 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1a15ed6a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T13:00:30,261 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@b5aedfa{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T13:00:30,261 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T13:00:30,261 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@469dec96{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T13:00:30,261 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@69a0f3c6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f73c49f-92be-712f-b706-5c3ddea78f5f/hadoop.log.dir/,STOPPED} 2024-11-10T13:00:30,263 WARN [BP-484755594-172.17.0.2-1731243627948 heartbeating to localhost/127.0.0.1:41629 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-10T13:00:30,263 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-10T13:00:30,263 WARN [BP-484755594-172.17.0.2-1731243627948 heartbeating to localhost/127.0.0.1:41629 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-484755594-172.17.0.2-1731243627948 (Datanode Uuid b6175fe2-3a73-4ed5-b908-b5291f10ad62) service to localhost/127.0.0.1:41629 2024-11-10T13:00:30,263 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-10T13:00:30,263 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f73c49f-92be-712f-b706-5c3ddea78f5f/cluster_5ebc9fdf-c17d-8bbb-a4d0-6f22d9f44e26/data/data1/current/BP-484755594-172.17.0.2-1731243627948 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T13:00:30,264 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f73c49f-92be-712f-b706-5c3ddea78f5f/cluster_5ebc9fdf-c17d-8bbb-a4d0-6f22d9f44e26/data/data2/current/BP-484755594-172.17.0.2-1731243627948 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T13:00:30,264 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-10T13:00:30,269 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1edca743{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-10T13:00:30,270 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c443180{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T13:00:30,270 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T13:00:30,270 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@59505eb5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T13:00:30,270 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@46d26a79{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f73c49f-92be-712f-b706-5c3ddea78f5f/hadoop.log.dir/,STOPPED} 2024-11-10T13:00:30,276 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-10T13:00:30,292 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-10T13:00:30,292 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-10T13:00:30,292 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f73c49f-92be-712f-b706-5c3ddea78f5f/hadoop.log.dir so I do NOT create it in target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa 2024-11-10T13:00:30,292 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4f73c49f-92be-712f-b706-5c3ddea78f5f/hadoop.tmp.dir so I do NOT create it in target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa 2024-11-10T13:00:30,292 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/cluster_ca3516ed-3462-34cb-04d5-c3857eb856b7, deleteOnExit=true 2024-11-10T13:00:30,292 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-10T13:00:30,292 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/test.cache.data in system properties and HBase conf 2024-11-10T13:00:30,292 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/hadoop.tmp.dir in system properties and HBase conf 2024-11-10T13:00:30,292 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/hadoop.log.dir in system properties and HBase conf 2024-11-10T13:00:30,293 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-10T13:00:30,293 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-10T13:00:30,293 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-10T13:00:30,293 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-10T13:00:30,293 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-10T13:00:30,293 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-10T13:00:30,293 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-10T13:00:30,293 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-10T13:00:30,294 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-10T13:00:30,294 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-10T13:00:30,294 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-10T13:00:30,294 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-10T13:00:30,294 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-10T13:00:30,294 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/nfs.dump.dir in system properties and HBase conf 2024-11-10T13:00:30,294 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/java.io.tmpdir in system properties and HBase conf 2024-11-10T13:00:30,294 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-10T13:00:30,294 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-10T13:00:30,294 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-10T13:00:30,308 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-10T13:00:30,383 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T13:00:30,388 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T13:00:30,393 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T13:00:30,393 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T13:00:30,393 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-10T13:00:30,397 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T13:00:30,397 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@142d24a0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/hadoop.log.dir/,AVAILABLE} 2024-11-10T13:00:30,398 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@42b52d44{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T13:00:30,516 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@c00ef51{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/java.io.tmpdir/jetty-localhost-42473-hadoop-hdfs-3_4_1-tests_jar-_-any-11547445857983717821/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-10T13:00:30,517 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@73b9709e{HTTP/1.1, (http/1.1)}{localhost:42473} 2024-11-10T13:00:30,517 INFO [Time-limited test {}] server.Server(415): Started @103488ms 2024-11-10T13:00:30,532 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-10T13:00:30,606 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T13:00:30,610 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T13:00:30,610 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T13:00:30,610 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T13:00:30,610 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-10T13:00:30,611 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@65dec1b8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/hadoop.log.dir/,AVAILABLE} 2024-11-10T13:00:30,611 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5a107105{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T13:00:30,726 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6de86657{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/java.io.tmpdir/jetty-localhost-36583-hadoop-hdfs-3_4_1-tests_jar-_-any-14881242590825973330/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T13:00:30,726 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6787773a{HTTP/1.1, (http/1.1)}{localhost:36583} 2024-11-10T13:00:30,727 INFO [Time-limited test {}] server.Server(415): Started @103697ms 2024-11-10T13:00:30,728 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-10T13:00:30,762 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T13:00:30,766 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T13:00:30,766 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T13:00:30,766 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T13:00:30,766 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-10T13:00:30,767 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@66182b08{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/hadoop.log.dir/,AVAILABLE} 2024-11-10T13:00:30,767 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7eee535{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T13:00:30,851 WARN [Thread-657 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/cluster_ca3516ed-3462-34cb-04d5-c3857eb856b7/data/data1/current/BP-925157896-172.17.0.2-1731243630326/current, will proceed with Du for space computation calculation, 2024-11-10T13:00:30,851 WARN [Thread-658 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/cluster_ca3516ed-3462-34cb-04d5-c3857eb856b7/data/data2/current/BP-925157896-172.17.0.2-1731243630326/current, will proceed with Du for space computation calculation, 2024-11-10T13:00:30,877 WARN [Thread-636 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-10T13:00:30,880 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe96be853a0916ab with lease ID 0x25040637d64d630f: Processing first storage report for DS-4fde2194-6593-44d9-9bb3-4066b71ca62b from datanode DatanodeRegistration(127.0.0.1:40725, datanodeUuid=37e29012-4976-4502-bb9a-d48f62b8c2f4, infoPort=38983, infoSecurePort=0, ipcPort=43009, storageInfo=lv=-57;cid=testClusterID;nsid=989579585;c=1731243630326) 2024-11-10T13:00:30,880 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe96be853a0916ab with lease ID 0x25040637d64d630f: from storage DS-4fde2194-6593-44d9-9bb3-4066b71ca62b node DatanodeRegistration(127.0.0.1:40725, datanodeUuid=37e29012-4976-4502-bb9a-d48f62b8c2f4, infoPort=38983, infoSecurePort=0, ipcPort=43009, storageInfo=lv=-57;cid=testClusterID;nsid=989579585;c=1731243630326), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T13:00:30,880 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe96be853a0916ab with lease ID 0x25040637d64d630f: Processing first storage report for DS-f55b1a4a-5865-41be-ac2c-80cb349681f4 from datanode DatanodeRegistration(127.0.0.1:40725, datanodeUuid=37e29012-4976-4502-bb9a-d48f62b8c2f4, infoPort=38983, infoSecurePort=0, ipcPort=43009, storageInfo=lv=-57;cid=testClusterID;nsid=989579585;c=1731243630326) 2024-11-10T13:00:30,881 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe96be853a0916ab with lease ID 0x25040637d64d630f: from storage DS-f55b1a4a-5865-41be-ac2c-80cb349681f4 node DatanodeRegistration(127.0.0.1:40725, datanodeUuid=37e29012-4976-4502-bb9a-d48f62b8c2f4, infoPort=38983, infoSecurePort=0, ipcPort=43009, storageInfo=lv=-57;cid=testClusterID;nsid=989579585;c=1731243630326), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T13:00:30,900 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3f2859b3{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/java.io.tmpdir/jetty-localhost-37233-hadoop-hdfs-3_4_1-tests_jar-_-any-7623707685568504885/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T13:00:30,901 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3b918d2a{HTTP/1.1, (http/1.1)}{localhost:37233} 2024-11-10T13:00:30,901 INFO [Time-limited test {}] server.Server(415): Started @103871ms 2024-11-10T13:00:30,902 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-10T13:00:31,004 WARN [Thread-683 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/cluster_ca3516ed-3462-34cb-04d5-c3857eb856b7/data/data3/current/BP-925157896-172.17.0.2-1731243630326/current, will proceed with Du for space computation calculation, 2024-11-10T13:00:31,004 WARN [Thread-684 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/cluster_ca3516ed-3462-34cb-04d5-c3857eb856b7/data/data4/current/BP-925157896-172.17.0.2-1731243630326/current, will proceed with Du for space computation calculation, 2024-11-10T13:00:31,022 WARN [Thread-672 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-10T13:00:31,024 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb2f874292e25d7c2 with lease ID 0x25040637d64d6310: Processing first storage report for DS-7d81c0d1-69f3-4b0a-a084-b628cd80149c from datanode DatanodeRegistration(127.0.0.1:35391, datanodeUuid=07be666f-232a-4760-aea5-e1fb3b4f419c, infoPort=39589, infoSecurePort=0, ipcPort=42481, storageInfo=lv=-57;cid=testClusterID;nsid=989579585;c=1731243630326) 2024-11-10T13:00:31,024 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb2f874292e25d7c2 with lease ID 0x25040637d64d6310: from storage DS-7d81c0d1-69f3-4b0a-a084-b628cd80149c node DatanodeRegistration(127.0.0.1:35391, datanodeUuid=07be666f-232a-4760-aea5-e1fb3b4f419c, infoPort=39589, infoSecurePort=0, ipcPort=42481, storageInfo=lv=-57;cid=testClusterID;nsid=989579585;c=1731243630326), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T13:00:31,025 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb2f874292e25d7c2 with lease ID 0x25040637d64d6310: Processing first storage report for DS-681b34e1-0a0d-4916-a216-ed7f44fc852a from datanode DatanodeRegistration(127.0.0.1:35391, datanodeUuid=07be666f-232a-4760-aea5-e1fb3b4f419c, infoPort=39589, infoSecurePort=0, ipcPort=42481, storageInfo=lv=-57;cid=testClusterID;nsid=989579585;c=1731243630326) 2024-11-10T13:00:31,025 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb2f874292e25d7c2 with lease ID 0x25040637d64d6310: from storage DS-681b34e1-0a0d-4916-a216-ed7f44fc852a node DatanodeRegistration(127.0.0.1:35391, datanodeUuid=07be666f-232a-4760-aea5-e1fb3b4f419c, infoPort=39589, infoSecurePort=0, ipcPort=42481, storageInfo=lv=-57;cid=testClusterID;nsid=989579585;c=1731243630326), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-10T13:00:31,031 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa 2024-11-10T13:00:31,034 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/cluster_ca3516ed-3462-34cb-04d5-c3857eb856b7/zookeeper_0, clientPort=56510, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/cluster_ca3516ed-3462-34cb-04d5-c3857eb856b7/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/cluster_ca3516ed-3462-34cb-04d5-c3857eb856b7/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-10T13:00:31,034 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=56510 2024-11-10T13:00:31,035 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:00:31,036 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:00:31,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35391 is added to blk_1073741825_1001 (size=7) 2024-11-10T13:00:31,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40725 is added to blk_1073741825_1001 (size=7) 2024-11-10T13:00:31,048 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de with version=8 2024-11-10T13:00:31,048 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/hbase-staging 2024-11-10T13:00:31,050 INFO [Time-limited test {}] client.ConnectionUtils(128): master/3857ccc89b65:0 server-side Connection retries=45 2024-11-10T13:00:31,050 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T13:00:31,050 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-10T13:00:31,050 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-10T13:00:31,050 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T13:00:31,051 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-10T13:00:31,051 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-10T13:00:31,051 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-10T13:00:31,051 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44143 2024-11-10T13:00:31,053 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:44143 connecting to ZooKeeper ensemble=127.0.0.1:56510 2024-11-10T13:00:31,059 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:441430x0, quorum=127.0.0.1:56510, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-10T13:00:31,060 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:44143-0x10101f74c490000 connected 2024-11-10T13:00:31,075 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:00:31,077 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:00:31,079 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44143-0x10101f74c490000, quorum=127.0.0.1:56510, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T13:00:31,080 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de, hbase.cluster.distributed=false 2024-11-10T13:00:31,081 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44143-0x10101f74c490000, quorum=127.0.0.1:56510, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-10T13:00:31,082 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44143 2024-11-10T13:00:31,082 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44143 2024-11-10T13:00:31,082 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44143 2024-11-10T13:00:31,082 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44143 2024-11-10T13:00:31,082 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44143 2024-11-10T13:00:31,099 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3857ccc89b65:0 server-side Connection retries=45 2024-11-10T13:00:31,099 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T13:00:31,099 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-10T13:00:31,099 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-10T13:00:31,099 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T13:00:31,099 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-10T13:00:31,099 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-10T13:00:31,099 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-10T13:00:31,100 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44789 2024-11-10T13:00:31,101 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44789 connecting to ZooKeeper ensemble=127.0.0.1:56510 2024-11-10T13:00:31,102 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:00:31,104 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:00:31,108 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:447890x0, quorum=127.0.0.1:56510, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-10T13:00:31,108 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44789-0x10101f74c490001 connected 2024-11-10T13:00:31,108 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44789-0x10101f74c490001, quorum=127.0.0.1:56510, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T13:00:31,108 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-10T13:00:31,109 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-10T13:00:31,110 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44789-0x10101f74c490001, quorum=127.0.0.1:56510, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-10T13:00:31,110 INFO [regionserver/3857ccc89b65:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T13:00:31,111 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44789-0x10101f74c490001, quorum=127.0.0.1:56510, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-10T13:00:31,111 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44789 2024-11-10T13:00:31,111 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44789 2024-11-10T13:00:31,112 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44789 2024-11-10T13:00:31,112 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44789 2024-11-10T13:00:31,114 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44789 2024-11-10T13:00:31,126 DEBUG [M:0;3857ccc89b65:44143 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;3857ccc89b65:44143 2024-11-10T13:00:31,127 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/3857ccc89b65,44143,1731243631050 2024-11-10T13:00:31,129 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44143-0x10101f74c490000, quorum=127.0.0.1:56510, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T13:00:31,129 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44789-0x10101f74c490001, quorum=127.0.0.1:56510, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T13:00:31,129 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44143-0x10101f74c490000, quorum=127.0.0.1:56510, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/3857ccc89b65,44143,1731243631050 2024-11-10T13:00:31,131 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44789-0x10101f74c490001, quorum=127.0.0.1:56510, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-10T13:00:31,131 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44789-0x10101f74c490001, quorum=127.0.0.1:56510, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:00:31,131 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44143-0x10101f74c490000, quorum=127.0.0.1:56510, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:00:31,131 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44143-0x10101f74c490000, quorum=127.0.0.1:56510, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-10T13:00:31,132 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/3857ccc89b65,44143,1731243631050 from backup master directory 2024-11-10T13:00:31,135 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44143-0x10101f74c490000, quorum=127.0.0.1:56510, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/3857ccc89b65,44143,1731243631050 2024-11-10T13:00:31,135 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44789-0x10101f74c490001, quorum=127.0.0.1:56510, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T13:00:31,135 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44143-0x10101f74c490000, quorum=127.0.0.1:56510, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T13:00:31,135 WARN [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-10T13:00:31,135 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=3857ccc89b65,44143,1731243631050 2024-11-10T13:00:31,139 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/hbase.id] with ID: 6ccc734d-58e5-4f29-8d18-ddb8531ae540 2024-11-10T13:00:31,139 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/.tmp/hbase.id 2024-11-10T13:00:31,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40725 is added to blk_1073741826_1002 (size=42) 2024-11-10T13:00:31,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35391 is added to blk_1073741826_1002 (size=42) 2024-11-10T13:00:31,146 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/.tmp/hbase.id]:[hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/hbase.id] 2024-11-10T13:00:31,161 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:00:31,161 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-10T13:00:31,162 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-10T13:00:31,164 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44143-0x10101f74c490000, quorum=127.0.0.1:56510, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:00:31,164 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44789-0x10101f74c490001, quorum=127.0.0.1:56510, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:00:31,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40725 is added to blk_1073741827_1003 (size=196) 2024-11-10T13:00:31,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35391 is added to blk_1073741827_1003 (size=196) 2024-11-10T13:00:31,177 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-10T13:00:31,178 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-10T13:00:31,178 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-10T13:00:31,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40725 is added to blk_1073741828_1004 (size=1189) 2024-11-10T13:00:31,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35391 is added to blk_1073741828_1004 (size=1189) 2024-11-10T13:00:31,187 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/MasterData/data/master/store 2024-11-10T13:00:31,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40725 is added to blk_1073741829_1005 (size=34) 2024-11-10T13:00:31,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35391 is added to blk_1073741829_1005 (size=34) 2024-11-10T13:00:31,194 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T13:00:31,194 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-10T13:00:31,195 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T13:00:31,195 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T13:00:31,195 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-10T13:00:31,195 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T13:00:31,195 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T13:00:31,195 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731243631194Disabling compacts and flushes for region at 1731243631194Disabling writes for close at 1731243631195 (+1 ms)Writing region close event to WAL at 1731243631195Closed at 1731243631195 2024-11-10T13:00:31,196 WARN [master/3857ccc89b65:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/MasterData/data/master/store/.initializing 2024-11-10T13:00:31,196 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/MasterData/WALs/3857ccc89b65,44143,1731243631050 2024-11-10T13:00:31,199 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3857ccc89b65%2C44143%2C1731243631050, suffix=, logDir=hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/MasterData/WALs/3857ccc89b65,44143,1731243631050, archiveDir=hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/MasterData/oldWALs, maxLogs=10 2024-11-10T13:00:31,199 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3857ccc89b65%2C44143%2C1731243631050.1731243631199 2024-11-10T13:00:31,205 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/MasterData/WALs/3857ccc89b65,44143,1731243631050/3857ccc89b65%2C44143%2C1731243631050.1731243631199 2024-11-10T13:00:31,209 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38983:38983),(127.0.0.1/127.0.0.1:39589:39589)] 2024-11-10T13:00:31,212 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-10T13:00:31,212 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T13:00:31,212 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:00:31,212 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:00:31,215 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:00:31,216 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-10T13:00:31,216 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:00:31,217 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:00:31,217 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:00:31,218 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-10T13:00:31,218 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:00:31,219 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T13:00:31,219 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:00:31,220 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-10T13:00:31,221 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:00:31,221 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T13:00:31,221 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:00:31,222 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-10T13:00:31,222 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:00:31,223 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T13:00:31,223 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:00:31,224 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:00:31,224 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:00:31,226 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:00:31,226 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:00:31,226 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-10T13:00:31,228 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:00:31,230 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T13:00:31,231 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=830056, jitterRate=0.05547149479389191}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-10T13:00:31,232 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731243631212Initializing all the Stores at 1731243631213 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731243631213Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731243631215 (+2 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731243631215Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731243631215Cleaning up temporary data from old regions at 1731243631226 (+11 ms)Region opened successfully at 1731243631232 (+6 ms) 2024-11-10T13:00:31,233 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-10T13:00:31,236 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@59772180, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3857ccc89b65/172.17.0.2:0 2024-11-10T13:00:31,237 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-10T13:00:31,238 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-10T13:00:31,238 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-10T13:00:31,238 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-10T13:00:31,238 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-10T13:00:31,239 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-10T13:00:31,239 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-10T13:00:31,241 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-10T13:00:31,242 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44143-0x10101f74c490000, quorum=127.0.0.1:56510, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-10T13:00:31,244 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-10T13:00:31,244 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-10T13:00:31,245 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44143-0x10101f74c490000, quorum=127.0.0.1:56510, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-10T13:00:31,246 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-10T13:00:31,247 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-10T13:00:31,248 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44143-0x10101f74c490000, quorum=127.0.0.1:56510, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-10T13:00:31,250 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-10T13:00:31,251 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44143-0x10101f74c490000, quorum=127.0.0.1:56510, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-10T13:00:31,252 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-10T13:00:31,255 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44143-0x10101f74c490000, quorum=127.0.0.1:56510, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-10T13:00:31,256 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-10T13:00:31,257 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44143-0x10101f74c490000, quorum=127.0.0.1:56510, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-10T13:00:31,257 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44789-0x10101f74c490001, quorum=127.0.0.1:56510, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-10T13:00:31,258 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44143-0x10101f74c490000, quorum=127.0.0.1:56510, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:00:31,258 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44789-0x10101f74c490001, quorum=127.0.0.1:56510, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:00:31,258 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=3857ccc89b65,44143,1731243631050, sessionid=0x10101f74c490000, setting cluster-up flag (Was=false) 2024-11-10T13:00:31,262 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44789-0x10101f74c490001, quorum=127.0.0.1:56510, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:00:31,262 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44143-0x10101f74c490000, quorum=127.0.0.1:56510, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:00:31,268 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-10T13:00:31,269 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3857ccc89b65,44143,1731243631050 2024-11-10T13:00:31,273 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44789-0x10101f74c490001, quorum=127.0.0.1:56510, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:00:31,273 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44143-0x10101f74c490000, quorum=127.0.0.1:56510, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:00:31,278 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-10T13:00:31,279 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3857ccc89b65,44143,1731243631050 2024-11-10T13:00:31,281 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-10T13:00:31,283 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-10T13:00:31,283 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-10T13:00:31,283 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-10T13:00:31,284 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 3857ccc89b65,44143,1731243631050 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-10T13:00:31,285 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/3857ccc89b65:0, corePoolSize=5, maxPoolSize=5 2024-11-10T13:00:31,285 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/3857ccc89b65:0, corePoolSize=5, maxPoolSize=5 2024-11-10T13:00:31,285 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/3857ccc89b65:0, corePoolSize=5, maxPoolSize=5 2024-11-10T13:00:31,285 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/3857ccc89b65:0, corePoolSize=5, maxPoolSize=5 2024-11-10T13:00:31,285 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/3857ccc89b65:0, corePoolSize=10, maxPoolSize=10 2024-11-10T13:00:31,285 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:00:31,285 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/3857ccc89b65:0, corePoolSize=2, maxPoolSize=2 2024-11-10T13:00:31,285 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:00:31,286 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731243661286 2024-11-10T13:00:31,286 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-10T13:00:31,286 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-10T13:00:31,286 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-10T13:00:31,286 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-10T13:00:31,286 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-10T13:00:31,286 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-10T13:00:31,286 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-10T13:00:31,287 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-10T13:00:31,287 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-10T13:00:31,287 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-10T13:00:31,287 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-10T13:00:31,287 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-10T13:00:31,287 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-10T13:00:31,288 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-10T13:00:31,288 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/3857ccc89b65:0:becomeActiveMaster-HFileCleaner.large.0-1731243631288,5,FailOnTimeoutGroup] 2024-11-10T13:00:31,288 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/3857ccc89b65:0:becomeActiveMaster-HFileCleaner.small.0-1731243631288,5,FailOnTimeoutGroup] 2024-11-10T13:00:31,288 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-10T13:00:31,288 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-10T13:00:31,288 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-10T13:00:31,288 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-10T13:00:31,288 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:00:31,289 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-10T13:00:31,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40725 is added to blk_1073741831_1007 (size=1321) 2024-11-10T13:00:31,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35391 is added to blk_1073741831_1007 (size=1321) 2024-11-10T13:00:31,300 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-10T13:00:31,300 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de 2024-11-10T13:00:31,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40725 is added to blk_1073741832_1008 (size=32) 2024-11-10T13:00:31,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35391 is added to blk_1073741832_1008 (size=32) 2024-11-10T13:00:31,307 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T13:00:31,308 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-10T13:00:31,310 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-10T13:00:31,310 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:00:31,310 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:00:31,310 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-10T13:00:31,312 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-10T13:00:31,312 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:00:31,312 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:00:31,312 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-10T13:00:31,314 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-10T13:00:31,314 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:00:31,315 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:00:31,315 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-10T13:00:31,316 INFO [RS:0;3857ccc89b65:44789 {}] regionserver.HRegionServer(746): ClusterId : 6ccc734d-58e5-4f29-8d18-ddb8531ae540 2024-11-10T13:00:31,316 DEBUG [RS:0;3857ccc89b65:44789 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-10T13:00:31,316 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-10T13:00:31,317 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:00:31,317 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:00:31,317 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-10T13:00:31,318 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/hbase/meta/1588230740 2024-11-10T13:00:31,319 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/hbase/meta/1588230740 2024-11-10T13:00:31,319 DEBUG [RS:0;3857ccc89b65:44789 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-10T13:00:31,319 DEBUG [RS:0;3857ccc89b65:44789 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-10T13:00:31,321 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-10T13:00:31,321 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-10T13:00:31,321 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-10T13:00:31,322 DEBUG [RS:0;3857ccc89b65:44789 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-10T13:00:31,323 DEBUG [RS:0;3857ccc89b65:44789 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2dc23149, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3857ccc89b65/172.17.0.2:0 2024-11-10T13:00:31,323 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-10T13:00:31,326 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T13:00:31,326 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=859640, jitterRate=0.09308880567550659}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-10T13:00:31,327 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731243631307Initializing all the Stores at 1731243631308 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731243631308Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731243631308Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731243631308Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731243631308Cleaning up temporary data from old regions at 1731243631321 (+13 ms)Region opened successfully at 1731243631327 (+6 ms) 2024-11-10T13:00:31,327 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-10T13:00:31,327 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-10T13:00:31,327 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-10T13:00:31,327 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-10T13:00:31,327 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-10T13:00:31,328 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-10T13:00:31,328 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731243631327Disabling compacts and flushes for region at 1731243631327Disabling writes for close at 1731243631327Writing region close event to WAL at 1731243631328 (+1 ms)Closed at 1731243631328 2024-11-10T13:00:31,330 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-10T13:00:31,330 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-10T13:00:31,330 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-10T13:00:31,331 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-10T13:00:31,333 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-10T13:00:31,341 DEBUG [RS:0;3857ccc89b65:44789 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;3857ccc89b65:44789 2024-11-10T13:00:31,341 INFO [RS:0;3857ccc89b65:44789 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-10T13:00:31,341 INFO [RS:0;3857ccc89b65:44789 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-10T13:00:31,341 DEBUG [RS:0;3857ccc89b65:44789 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-10T13:00:31,342 INFO [RS:0;3857ccc89b65:44789 {}] regionserver.HRegionServer(2659): reportForDuty to master=3857ccc89b65,44143,1731243631050 with port=44789, startcode=1731243631098 2024-11-10T13:00:31,342 DEBUG [RS:0;3857ccc89b65:44789 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-10T13:00:31,344 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44731, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-10T13:00:31,345 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44143 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3857ccc89b65,44789,1731243631098 2024-11-10T13:00:31,345 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44143 {}] master.ServerManager(517): Registering regionserver=3857ccc89b65,44789,1731243631098 2024-11-10T13:00:31,347 DEBUG [RS:0;3857ccc89b65:44789 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de 2024-11-10T13:00:31,347 DEBUG [RS:0;3857ccc89b65:44789 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35903 2024-11-10T13:00:31,347 DEBUG [RS:0;3857ccc89b65:44789 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-10T13:00:31,354 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44143-0x10101f74c490000, quorum=127.0.0.1:56510, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-10T13:00:31,354 DEBUG [RS:0;3857ccc89b65:44789 {}] zookeeper.ZKUtil(111): regionserver:44789-0x10101f74c490001, quorum=127.0.0.1:56510, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3857ccc89b65,44789,1731243631098 2024-11-10T13:00:31,355 WARN [RS:0;3857ccc89b65:44789 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-10T13:00:31,355 INFO [RS:0;3857ccc89b65:44789 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-10T13:00:31,355 DEBUG [RS:0;3857ccc89b65:44789 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098 2024-11-10T13:00:31,355 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3857ccc89b65,44789,1731243631098] 2024-11-10T13:00:31,358 INFO [RS:0;3857ccc89b65:44789 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-10T13:00:31,361 INFO [RS:0;3857ccc89b65:44789 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-10T13:00:31,362 INFO [RS:0;3857ccc89b65:44789 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-10T13:00:31,362 INFO [RS:0;3857ccc89b65:44789 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T13:00:31,362 INFO [RS:0;3857ccc89b65:44789 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-10T13:00:31,363 INFO [RS:0;3857ccc89b65:44789 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-10T13:00:31,363 INFO [RS:0;3857ccc89b65:44789 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-10T13:00:31,363 DEBUG [RS:0;3857ccc89b65:44789 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:00:31,363 DEBUG [RS:0;3857ccc89b65:44789 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:00:31,363 DEBUG [RS:0;3857ccc89b65:44789 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:00:31,363 DEBUG [RS:0;3857ccc89b65:44789 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:00:31,363 DEBUG [RS:0;3857ccc89b65:44789 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:00:31,363 DEBUG [RS:0;3857ccc89b65:44789 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3857ccc89b65:0, corePoolSize=2, maxPoolSize=2 2024-11-10T13:00:31,363 DEBUG [RS:0;3857ccc89b65:44789 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:00:31,363 DEBUG [RS:0;3857ccc89b65:44789 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:00:31,363 DEBUG [RS:0;3857ccc89b65:44789 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:00:31,364 DEBUG [RS:0;3857ccc89b65:44789 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:00:31,364 DEBUG [RS:0;3857ccc89b65:44789 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:00:31,364 DEBUG [RS:0;3857ccc89b65:44789 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:00:31,364 DEBUG [RS:0;3857ccc89b65:44789 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3857ccc89b65:0, corePoolSize=3, maxPoolSize=3 2024-11-10T13:00:31,364 DEBUG [RS:0;3857ccc89b65:44789 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3857ccc89b65:0, corePoolSize=3, maxPoolSize=3 2024-11-10T13:00:31,364 INFO [RS:0;3857ccc89b65:44789 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T13:00:31,364 INFO [RS:0;3857ccc89b65:44789 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T13:00:31,364 INFO [RS:0;3857ccc89b65:44789 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T13:00:31,364 INFO [RS:0;3857ccc89b65:44789 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-10T13:00:31,364 INFO [RS:0;3857ccc89b65:44789 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-10T13:00:31,364 INFO [RS:0;3857ccc89b65:44789 {}] hbase.ChoreService(168): Chore ScheduledChore name=3857ccc89b65,44789,1731243631098-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-10T13:00:31,380 INFO [RS:0;3857ccc89b65:44789 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-10T13:00:31,381 INFO [RS:0;3857ccc89b65:44789 {}] hbase.ChoreService(168): Chore ScheduledChore name=3857ccc89b65,44789,1731243631098-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T13:00:31,381 INFO [RS:0;3857ccc89b65:44789 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T13:00:31,381 INFO [RS:0;3857ccc89b65:44789 {}] regionserver.Replication(171): 3857ccc89b65,44789,1731243631098 started 2024-11-10T13:00:31,396 INFO [RS:0;3857ccc89b65:44789 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T13:00:31,396 INFO [RS:0;3857ccc89b65:44789 {}] regionserver.HRegionServer(1482): Serving as 3857ccc89b65,44789,1731243631098, RpcServer on 3857ccc89b65/172.17.0.2:44789, sessionid=0x10101f74c490001 2024-11-10T13:00:31,396 DEBUG [RS:0;3857ccc89b65:44789 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-10T13:00:31,396 DEBUG [RS:0;3857ccc89b65:44789 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3857ccc89b65,44789,1731243631098 2024-11-10T13:00:31,396 DEBUG [RS:0;3857ccc89b65:44789 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3857ccc89b65,44789,1731243631098' 2024-11-10T13:00:31,396 DEBUG [RS:0;3857ccc89b65:44789 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-10T13:00:31,397 DEBUG [RS:0;3857ccc89b65:44789 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-10T13:00:31,397 DEBUG [RS:0;3857ccc89b65:44789 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-10T13:00:31,397 DEBUG [RS:0;3857ccc89b65:44789 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-10T13:00:31,397 DEBUG [RS:0;3857ccc89b65:44789 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3857ccc89b65,44789,1731243631098 2024-11-10T13:00:31,397 DEBUG [RS:0;3857ccc89b65:44789 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3857ccc89b65,44789,1731243631098' 2024-11-10T13:00:31,397 DEBUG [RS:0;3857ccc89b65:44789 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-10T13:00:31,398 DEBUG [RS:0;3857ccc89b65:44789 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-10T13:00:31,398 DEBUG [RS:0;3857ccc89b65:44789 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-10T13:00:31,398 INFO [RS:0;3857ccc89b65:44789 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-10T13:00:31,398 INFO [RS:0;3857ccc89b65:44789 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-10T13:00:31,483 WARN [3857ccc89b65:44143 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-10T13:00:31,501 INFO [RS:0;3857ccc89b65:44789 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3857ccc89b65%2C44789%2C1731243631098, suffix=, logDir=hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098, archiveDir=hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/oldWALs, maxLogs=32 2024-11-10T13:00:31,502 INFO [RS:0;3857ccc89b65:44789 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3857ccc89b65%2C44789%2C1731243631098.1731243631502 2024-11-10T13:00:31,509 INFO [RS:0;3857ccc89b65:44789 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.1731243631502 2024-11-10T13:00:31,513 DEBUG [RS:0;3857ccc89b65:44789 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39589:39589),(127.0.0.1/127.0.0.1:38983:38983)] 2024-11-10T13:00:31,733 DEBUG [3857ccc89b65:44143 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-10T13:00:31,734 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=3857ccc89b65,44789,1731243631098 2024-11-10T13:00:31,735 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3857ccc89b65,44789,1731243631098, state=OPENING 2024-11-10T13:00:31,737 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-10T13:00:31,738 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44789-0x10101f74c490001, quorum=127.0.0.1:56510, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:00:31,738 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44143-0x10101f74c490000, quorum=127.0.0.1:56510, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:00:31,739 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T13:00:31,739 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-10T13:00:31,739 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T13:00:31,739 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=3857ccc89b65,44789,1731243631098}] 2024-11-10T13:00:31,891 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-10T13:00:31,894 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36935, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-10T13:00:31,898 INFO [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-10T13:00:31,898 INFO [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-10T13:00:31,900 INFO [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3857ccc89b65%2C44789%2C1731243631098.meta, suffix=.meta, logDir=hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098, archiveDir=hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/oldWALs, maxLogs=32 2024-11-10T13:00:31,901 INFO [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta 2024-11-10T13:00:31,906 INFO [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta 2024-11-10T13:00:31,907 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39589:39589),(127.0.0.1/127.0.0.1:38983:38983)] 2024-11-10T13:00:31,907 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-10T13:00:31,908 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-10T13:00:31,908 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-10T13:00:31,908 INFO [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-10T13:00:31,908 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-10T13:00:31,908 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T13:00:31,909 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-10T13:00:31,909 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-10T13:00:31,910 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-10T13:00:31,911 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-10T13:00:31,911 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:00:31,912 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:00:31,912 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-10T13:00:31,913 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-10T13:00:31,913 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:00:31,914 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:00:31,914 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-10T13:00:31,914 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-10T13:00:31,914 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:00:31,915 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:00:31,915 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-10T13:00:31,916 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-10T13:00:31,916 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:00:31,916 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:00:31,917 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-10T13:00:31,917 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/hbase/meta/1588230740 2024-11-10T13:00:31,919 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/hbase/meta/1588230740 2024-11-10T13:00:31,920 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-10T13:00:31,921 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-10T13:00:31,921 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-10T13:00:31,922 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-10T13:00:31,923 INFO [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=706186, jitterRate=-0.10203903913497925}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-10T13:00:31,923 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-10T13:00:31,924 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731243631909Writing region info on filesystem at 1731243631909Initializing all the Stores at 1731243631910 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731243631910Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731243631910Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731243631910Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731243631910Cleaning up temporary data from old regions at 1731243631921 (+11 ms)Running coprocessor post-open hooks at 1731243631923 (+2 ms)Region opened successfully at 1731243631924 (+1 ms) 2024-11-10T13:00:31,926 INFO [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731243631891 2024-11-10T13:00:31,928 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-10T13:00:31,928 INFO [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-10T13:00:31,930 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=3857ccc89b65,44789,1731243631098 2024-11-10T13:00:31,931 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3857ccc89b65,44789,1731243631098, state=OPEN 2024-11-10T13:00:31,937 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44143-0x10101f74c490000, quorum=127.0.0.1:56510, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-10T13:00:31,937 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44789-0x10101f74c490001, quorum=127.0.0.1:56510, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-10T13:00:31,937 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=3857ccc89b65,44789,1731243631098 2024-11-10T13:00:31,937 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T13:00:31,937 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T13:00:31,940 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-10T13:00:31,940 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=3857ccc89b65,44789,1731243631098 in 198 msec 2024-11-10T13:00:31,943 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-10T13:00:31,943 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 610 msec 2024-11-10T13:00:31,944 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-10T13:00:31,944 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-10T13:00:31,946 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-10T13:00:31,946 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3857ccc89b65,44789,1731243631098, seqNum=-1] 2024-11-10T13:00:31,946 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-10T13:00:31,947 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41719, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-10T13:00:31,953 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 670 msec 2024-11-10T13:00:31,953 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731243631953, completionTime=-1 2024-11-10T13:00:31,953 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-10T13:00:31,953 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-10T13:00:31,955 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-10T13:00:31,955 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731243691955 2024-11-10T13:00:31,955 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731243751955 2024-11-10T13:00:31,955 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 1 msec 2024-11-10T13:00:31,955 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3857ccc89b65,44143,1731243631050-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T13:00:31,955 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3857ccc89b65,44143,1731243631050-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T13:00:31,955 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3857ccc89b65,44143,1731243631050-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T13:00:31,956 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-3857ccc89b65:44143, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T13:00:31,956 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-10T13:00:31,956 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-10T13:00:31,957 DEBUG [master/3857ccc89b65:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-10T13:00:31,959 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.824sec 2024-11-10T13:00:31,960 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-10T13:00:31,960 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-10T13:00:31,960 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-10T13:00:31,960 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-10T13:00:31,960 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-10T13:00:31,960 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3857ccc89b65,44143,1731243631050-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-10T13:00:31,960 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3857ccc89b65,44143,1731243631050-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-10T13:00:31,962 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-10T13:00:31,962 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-10T13:00:31,962 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3857ccc89b65,44143,1731243631050-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T13:00:32,016 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@53c8d161, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-10T13:00:32,016 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 3857ccc89b65,44143,-1 for getting cluster id 2024-11-10T13:00:32,016 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-10T13:00:32,018 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6ccc734d-58e5-4f29-8d18-ddb8531ae540' 2024-11-10T13:00:32,019 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-10T13:00:32,019 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6ccc734d-58e5-4f29-8d18-ddb8531ae540" 2024-11-10T13:00:32,019 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1535ec62, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-10T13:00:32,019 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3857ccc89b65,44143,-1] 2024-11-10T13:00:32,019 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-10T13:00:32,020 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T13:00:32,021 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50424, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-10T13:00:32,022 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4cae84f2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-10T13:00:32,022 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-10T13:00:32,023 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3857ccc89b65,44789,1731243631098, seqNum=-1] 2024-11-10T13:00:32,024 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-10T13:00:32,025 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34918, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-10T13:00:32,027 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=3857ccc89b65,44143,1731243631050 2024-11-10T13:00:32,027 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:00:32,031 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-10T13:00:32,055 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3857ccc89b65:0 server-side Connection retries=45 2024-11-10T13:00:32,055 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T13:00:32,055 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-10T13:00:32,055 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-10T13:00:32,055 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T13:00:32,055 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-10T13:00:32,055 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-10T13:00:32,055 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-10T13:00:32,056 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38535 2024-11-10T13:00:32,057 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38535 connecting to ZooKeeper ensemble=127.0.0.1:56510 2024-11-10T13:00:32,058 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:00:32,060 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:00:32,065 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:385350x0, quorum=127.0.0.1:56510, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-10T13:00:32,065 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38535-0x10101f74c490002 connected 2024-11-10T13:00:32,065 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:38535-0x10101f74c490002, quorum=127.0.0.1:56510, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-11-10T13:00:32,065 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-11-10T13:00:32,066 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-10T13:00:32,066 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-10T13:00:32,067 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:38535-0x10101f74c490002, quorum=127.0.0.1:56510, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-10T13:00:32,068 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38535-0x10101f74c490002, quorum=127.0.0.1:56510, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-10T13:00:32,069 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38535 2024-11-10T13:00:32,069 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38535 2024-11-10T13:00:32,069 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38535 2024-11-10T13:00:32,070 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38535 2024-11-10T13:00:32,072 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38535 2024-11-10T13:00:32,098 INFO [RS:1;3857ccc89b65:38535 {}] regionserver.HRegionServer(746): ClusterId : 6ccc734d-58e5-4f29-8d18-ddb8531ae540 2024-11-10T13:00:32,098 DEBUG [RS:1;3857ccc89b65:38535 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-10T13:00:32,100 DEBUG [RS:1;3857ccc89b65:38535 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-10T13:00:32,100 DEBUG [RS:1;3857ccc89b65:38535 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-10T13:00:32,102 DEBUG [RS:1;3857ccc89b65:38535 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-10T13:00:32,103 DEBUG [RS:1;3857ccc89b65:38535 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7257e1ad, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3857ccc89b65/172.17.0.2:0 2024-11-10T13:00:32,115 DEBUG [RS:1;3857ccc89b65:38535 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;3857ccc89b65:38535 2024-11-10T13:00:32,115 INFO [RS:1;3857ccc89b65:38535 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-10T13:00:32,115 INFO [RS:1;3857ccc89b65:38535 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-10T13:00:32,115 DEBUG [RS:1;3857ccc89b65:38535 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-10T13:00:32,116 INFO [RS:1;3857ccc89b65:38535 {}] regionserver.HRegionServer(2659): reportForDuty to master=3857ccc89b65,44143,1731243631050 with port=38535, startcode=1731243632054 2024-11-10T13:00:32,116 DEBUG [RS:1;3857ccc89b65:38535 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-10T13:00:32,118 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33757, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-10T13:00:32,119 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44143 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3857ccc89b65,38535,1731243632054 2024-11-10T13:00:32,119 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44143 {}] master.ServerManager(517): Registering regionserver=3857ccc89b65,38535,1731243632054 2024-11-10T13:00:32,121 DEBUG [RS:1;3857ccc89b65:38535 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de 2024-11-10T13:00:32,121 DEBUG [RS:1;3857ccc89b65:38535 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35903 2024-11-10T13:00:32,121 DEBUG [RS:1;3857ccc89b65:38535 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-10T13:00:32,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44143-0x10101f74c490000, quorum=127.0.0.1:56510, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-10T13:00:32,125 DEBUG [RS:1;3857ccc89b65:38535 {}] zookeeper.ZKUtil(111): regionserver:38535-0x10101f74c490002, quorum=127.0.0.1:56510, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3857ccc89b65,38535,1731243632054 2024-11-10T13:00:32,125 WARN [RS:1;3857ccc89b65:38535 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-10T13:00:32,125 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3857ccc89b65,38535,1731243632054] 2024-11-10T13:00:32,125 INFO [RS:1;3857ccc89b65:38535 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-10T13:00:32,125 DEBUG [RS:1;3857ccc89b65:38535 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054 2024-11-10T13:00:32,129 INFO [RS:1;3857ccc89b65:38535 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-10T13:00:32,131 INFO [RS:1;3857ccc89b65:38535 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-10T13:00:32,131 INFO [RS:1;3857ccc89b65:38535 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-10T13:00:32,131 INFO [RS:1;3857ccc89b65:38535 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T13:00:32,132 INFO [RS:1;3857ccc89b65:38535 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-10T13:00:32,132 INFO [RS:1;3857ccc89b65:38535 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-10T13:00:32,132 INFO [RS:1;3857ccc89b65:38535 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-10T13:00:32,132 DEBUG [RS:1;3857ccc89b65:38535 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:00:32,133 DEBUG [RS:1;3857ccc89b65:38535 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:00:32,133 DEBUG [RS:1;3857ccc89b65:38535 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:00:32,133 DEBUG [RS:1;3857ccc89b65:38535 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:00:32,133 DEBUG [RS:1;3857ccc89b65:38535 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:00:32,133 DEBUG [RS:1;3857ccc89b65:38535 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3857ccc89b65:0, corePoolSize=2, maxPoolSize=2 2024-11-10T13:00:32,133 DEBUG [RS:1;3857ccc89b65:38535 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:00:32,133 DEBUG [RS:1;3857ccc89b65:38535 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:00:32,133 DEBUG [RS:1;3857ccc89b65:38535 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:00:32,133 DEBUG [RS:1;3857ccc89b65:38535 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:00:32,133 DEBUG [RS:1;3857ccc89b65:38535 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:00:32,133 DEBUG [RS:1;3857ccc89b65:38535 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:00:32,133 DEBUG [RS:1;3857ccc89b65:38535 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3857ccc89b65:0, corePoolSize=3, maxPoolSize=3 2024-11-10T13:00:32,133 DEBUG [RS:1;3857ccc89b65:38535 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3857ccc89b65:0, corePoolSize=3, maxPoolSize=3 2024-11-10T13:00:32,136 INFO [RS:1;3857ccc89b65:38535 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T13:00:32,136 INFO [RS:1;3857ccc89b65:38535 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T13:00:32,136 INFO [RS:1;3857ccc89b65:38535 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T13:00:32,136 INFO [RS:1;3857ccc89b65:38535 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-10T13:00:32,136 INFO [RS:1;3857ccc89b65:38535 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-10T13:00:32,136 INFO [RS:1;3857ccc89b65:38535 {}] hbase.ChoreService(168): Chore ScheduledChore name=3857ccc89b65,38535,1731243632054-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-10T13:00:32,151 INFO [RS:1;3857ccc89b65:38535 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-10T13:00:32,151 INFO [RS:1;3857ccc89b65:38535 {}] hbase.ChoreService(168): Chore ScheduledChore name=3857ccc89b65,38535,1731243632054-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T13:00:32,152 INFO [RS:1;3857ccc89b65:38535 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T13:00:32,152 INFO [RS:1;3857ccc89b65:38535 {}] regionserver.Replication(171): 3857ccc89b65,38535,1731243632054 started 2024-11-10T13:00:32,165 INFO [RS:1;3857ccc89b65:38535 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T13:00:32,165 INFO [RS:1;3857ccc89b65:38535 {}] regionserver.HRegionServer(1482): Serving as 3857ccc89b65,38535,1731243632054, RpcServer on 3857ccc89b65/172.17.0.2:38535, sessionid=0x10101f74c490002 2024-11-10T13:00:32,165 DEBUG [RS:1;3857ccc89b65:38535 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-10T13:00:32,165 DEBUG [RS:1;3857ccc89b65:38535 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3857ccc89b65,38535,1731243632054 2024-11-10T13:00:32,165 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;3857ccc89b65:38535,5,FailOnTimeoutGroup] 2024-11-10T13:00:32,165 DEBUG [RS:1;3857ccc89b65:38535 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3857ccc89b65,38535,1731243632054' 2024-11-10T13:00:32,166 DEBUG [RS:1;3857ccc89b65:38535 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-10T13:00:32,166 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-11-10T13:00:32,166 DEBUG [RS:1;3857ccc89b65:38535 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-10T13:00:32,166 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-10T13:00:32,166 DEBUG [RS:1;3857ccc89b65:38535 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-10T13:00:32,167 DEBUG [RS:1;3857ccc89b65:38535 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-10T13:00:32,167 DEBUG [RS:1;3857ccc89b65:38535 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3857ccc89b65,38535,1731243632054 2024-11-10T13:00:32,167 DEBUG [RS:1;3857ccc89b65:38535 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3857ccc89b65,38535,1731243632054' 2024-11-10T13:00:32,167 DEBUG [RS:1;3857ccc89b65:38535 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-10T13:00:32,167 DEBUG [RS:1;3857ccc89b65:38535 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-10T13:00:32,167 DEBUG [RS:1;3857ccc89b65:38535 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-10T13:00:32,167 INFO [RS:1;3857ccc89b65:38535 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-10T13:00:32,167 INFO [RS:1;3857ccc89b65:38535 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-10T13:00:32,167 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is 3857ccc89b65,44143,1731243631050 2024-11-10T13:00:32,168 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@4fa8e196 2024-11-10T13:00:32,168 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-10T13:00:32,169 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50432, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-10T13:00:32,170 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44143 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-10T13:00:32,170 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44143 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-10T13:00:32,170 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44143 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-10T13:00:32,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44143 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-11-10T13:00:32,173 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-11-10T13:00:32,174 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44143 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-11-10T13:00:32,174 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:00:32,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44143 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-10T13:00:32,175 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-10T13:00:32,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35391 is added to blk_1073741835_1011 (size=393) 2024-11-10T13:00:32,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40725 is added to blk_1073741835_1011 (size=393) 2024-11-10T13:00:32,186 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 420a6a06ae441e38de6228ffc018d528, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731243632170.420a6a06ae441e38de6228ffc018d528.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de 2024-11-10T13:00:32,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35391 is added to blk_1073741836_1012 (size=76) 2024-11-10T13:00:32,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40725 is added to blk_1073741836_1012 (size=76) 2024-11-10T13:00:32,195 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731243632170.420a6a06ae441e38de6228ffc018d528.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T13:00:32,196 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing 420a6a06ae441e38de6228ffc018d528, disabling compactions & flushes 2024-11-10T13:00:32,196 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731243632170.420a6a06ae441e38de6228ffc018d528. 2024-11-10T13:00:32,196 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731243632170.420a6a06ae441e38de6228ffc018d528. 2024-11-10T13:00:32,196 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731243632170.420a6a06ae441e38de6228ffc018d528. after waiting 0 ms 2024-11-10T13:00:32,196 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731243632170.420a6a06ae441e38de6228ffc018d528. 2024-11-10T13:00:32,196 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731243632170.420a6a06ae441e38de6228ffc018d528. 2024-11-10T13:00:32,196 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for 420a6a06ae441e38de6228ffc018d528: Waiting for close lock at 1731243632195Disabling compacts and flushes for region at 1731243632195Disabling writes for close at 1731243632196 (+1 ms)Writing region close event to WAL at 1731243632196Closed at 1731243632196 2024-11-10T13:00:32,198 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-11-10T13:00:32,198 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1731243632170.420a6a06ae441e38de6228ffc018d528.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1731243632198"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731243632198"}]},"ts":"1731243632198"} 2024-11-10T13:00:32,200 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-10T13:00:32,201 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-10T13:00:32,202 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731243632201"}]},"ts":"1731243632201"} 2024-11-10T13:00:32,204 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-11-10T13:00:32,204 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=420a6a06ae441e38de6228ffc018d528, ASSIGN}] 2024-11-10T13:00:32,205 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=420a6a06ae441e38de6228ffc018d528, ASSIGN 2024-11-10T13:00:32,206 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=420a6a06ae441e38de6228ffc018d528, ASSIGN; state=OFFLINE, location=3857ccc89b65,44789,1731243631098; forceNewPlan=false, retain=false 2024-11-10T13:00:32,270 INFO [RS:1;3857ccc89b65:38535 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3857ccc89b65%2C38535%2C1731243632054, suffix=, logDir=hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054, archiveDir=hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/oldWALs, maxLogs=32 2024-11-10T13:00:32,271 INFO [RS:1;3857ccc89b65:38535 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3857ccc89b65%2C38535%2C1731243632054.1731243632271 2024-11-10T13:00:32,278 INFO [RS:1;3857ccc89b65:38535 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 2024-11-10T13:00:32,278 DEBUG [RS:1;3857ccc89b65:38535 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39589:39589),(127.0.0.1/127.0.0.1:38983:38983)] 2024-11-10T13:00:32,309 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-10T13:00:32,357 INFO [3857ccc89b65:44143 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-10T13:00:32,358 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=420a6a06ae441e38de6228ffc018d528, regionState=OPENING, regionLocation=3857ccc89b65,44789,1731243631098 2024-11-10T13:00:32,361 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=420a6a06ae441e38de6228ffc018d528, ASSIGN because future has completed 2024-11-10T13:00:32,361 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 420a6a06ae441e38de6228ffc018d528, server=3857ccc89b65,44789,1731243631098}] 2024-11-10T13:00:32,373 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:00:32,376 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:00:32,519 INFO [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1731243632170.420a6a06ae441e38de6228ffc018d528. 2024-11-10T13:00:32,520 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 420a6a06ae441e38de6228ffc018d528, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731243632170.420a6a06ae441e38de6228ffc018d528.', STARTKEY => '', ENDKEY => ''} 2024-11-10T13:00:32,520 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath 420a6a06ae441e38de6228ffc018d528 2024-11-10T13:00:32,520 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731243632170.420a6a06ae441e38de6228ffc018d528.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T13:00:32,520 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 420a6a06ae441e38de6228ffc018d528 2024-11-10T13:00:32,520 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 420a6a06ae441e38de6228ffc018d528 2024-11-10T13:00:32,522 INFO [StoreOpener-420a6a06ae441e38de6228ffc018d528-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 420a6a06ae441e38de6228ffc018d528 2024-11-10T13:00:32,523 INFO [StoreOpener-420a6a06ae441e38de6228ffc018d528-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 420a6a06ae441e38de6228ffc018d528 columnFamilyName info 2024-11-10T13:00:32,524 DEBUG [StoreOpener-420a6a06ae441e38de6228ffc018d528-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:00:32,524 INFO [StoreOpener-420a6a06ae441e38de6228ffc018d528-1 {}] regionserver.HStore(327): Store=420a6a06ae441e38de6228ffc018d528/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T13:00:32,524 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 420a6a06ae441e38de6228ffc018d528 2024-11-10T13:00:32,525 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528 2024-11-10T13:00:32,525 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528 2024-11-10T13:00:32,526 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 420a6a06ae441e38de6228ffc018d528 2024-11-10T13:00:32,526 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 420a6a06ae441e38de6228ffc018d528 2024-11-10T13:00:32,528 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 420a6a06ae441e38de6228ffc018d528 2024-11-10T13:00:32,530 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T13:00:32,530 INFO [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 420a6a06ae441e38de6228ffc018d528; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=736193, jitterRate=-0.06388232111930847}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-10T13:00:32,530 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 420a6a06ae441e38de6228ffc018d528 2024-11-10T13:00:32,531 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 420a6a06ae441e38de6228ffc018d528: Running coprocessor pre-open hook at 1731243632521Writing region info on filesystem at 1731243632521Initializing all the Stores at 1731243632521Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731243632522 (+1 ms)Cleaning up temporary data from old regions at 1731243632526 (+4 ms)Running coprocessor post-open hooks at 1731243632530 (+4 ms)Region opened successfully at 1731243632531 (+1 ms) 2024-11-10T13:00:32,532 INFO [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1731243632170.420a6a06ae441e38de6228ffc018d528., pid=6, masterSystemTime=1731243632515 2024-11-10T13:00:32,534 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1731243632170.420a6a06ae441e38de6228ffc018d528. 2024-11-10T13:00:32,534 INFO [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1731243632170.420a6a06ae441e38de6228ffc018d528. 2024-11-10T13:00:32,535 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=420a6a06ae441e38de6228ffc018d528, regionState=OPEN, openSeqNum=2, regionLocation=3857ccc89b65,44789,1731243631098 2024-11-10T13:00:32,538 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 420a6a06ae441e38de6228ffc018d528, server=3857ccc89b65,44789,1731243631098 because future has completed 2024-11-10T13:00:32,542 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-10T13:00:32,542 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 420a6a06ae441e38de6228ffc018d528, server=3857ccc89b65,44789,1731243631098 in 178 msec 2024-11-10T13:00:32,545 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-10T13:00:32,545 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=420a6a06ae441e38de6228ffc018d528, ASSIGN in 338 msec 2024-11-10T13:00:32,546 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-10T13:00:32,546 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731243632546"}]},"ts":"1731243632546"} 2024-11-10T13:00:32,549 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-11-10T13:00:32,550 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-11-10T13:00:32,552 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 380 msec 2024-11-10T13:00:32,887 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-10T13:00:32,891 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:00:32,908 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:00:32,909 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:00:32,909 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:00:37,812 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-10T13:00:37,815 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:00:37,836 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:00:37,837 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:00:37,838 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:00:37,847 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-10T13:00:37,847 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-11-10T13:00:42,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44143 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-10T13:00:42,193 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-11-10T13:00:42,193 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-11-10T13:00:42,197 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-11-10T13:00:42,197 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1731243632170.420a6a06ae441e38de6228ffc018d528. 2024-11-10T13:00:42,209 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T13:00:42,212 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T13:00:42,213 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T13:00:42,213 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T13:00:42,213 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-10T13:00:42,214 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@21ddd8f9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/hadoop.log.dir/,AVAILABLE} 2024-11-10T13:00:42,214 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6d0f4a9a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T13:00:42,309 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-10T13:00:42,309 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-11-10T13:00:42,327 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@730725ba{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/java.io.tmpdir/jetty-localhost-37329-hadoop-hdfs-3_4_1-tests_jar-_-any-11741670430605924385/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T13:00:42,328 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@20e1b523{HTTP/1.1, (http/1.1)}{localhost:37329} 2024-11-10T13:00:42,328 INFO [Time-limited test {}] server.Server(415): Started @115299ms 2024-11-10T13:00:42,329 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-10T13:00:42,361 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T13:00:42,364 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T13:00:42,365 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T13:00:42,365 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T13:00:42,365 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-10T13:00:42,365 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@83c4e47{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/hadoop.log.dir/,AVAILABLE} 2024-11-10T13:00:42,365 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@519de6b2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T13:00:42,427 WARN [Thread-829 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/cluster_ca3516ed-3462-34cb-04d5-c3857eb856b7/data/data5/current/BP-925157896-172.17.0.2-1731243630326/current, will proceed with Du for space computation calculation, 2024-11-10T13:00:42,427 WARN [Thread-830 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/cluster_ca3516ed-3462-34cb-04d5-c3857eb856b7/data/data6/current/BP-925157896-172.17.0.2-1731243630326/current, will proceed with Du for space computation calculation, 2024-11-10T13:00:42,444 WARN [Thread-809 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-10T13:00:42,447 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc6e1b7620eda4ac8 with lease ID 0x25040637d64d6311: Processing first storage report for DS-b5f80209-4e09-49c3-8a4c-611ce85f484d from datanode DatanodeRegistration(127.0.0.1:33513, datanodeUuid=bfc87161-86af-4fa0-8a78-daabb5d93818, infoPort=34759, infoSecurePort=0, ipcPort=33929, storageInfo=lv=-57;cid=testClusterID;nsid=989579585;c=1731243630326) 2024-11-10T13:00:42,447 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc6e1b7620eda4ac8 with lease ID 0x25040637d64d6311: from storage DS-b5f80209-4e09-49c3-8a4c-611ce85f484d node DatanodeRegistration(127.0.0.1:33513, datanodeUuid=bfc87161-86af-4fa0-8a78-daabb5d93818, infoPort=34759, infoSecurePort=0, ipcPort=33929, storageInfo=lv=-57;cid=testClusterID;nsid=989579585;c=1731243630326), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T13:00:42,447 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc6e1b7620eda4ac8 with lease ID 0x25040637d64d6311: Processing first storage report for DS-aaac7a58-abff-4c0f-9dca-4c9dad116257 from datanode DatanodeRegistration(127.0.0.1:33513, datanodeUuid=bfc87161-86af-4fa0-8a78-daabb5d93818, infoPort=34759, infoSecurePort=0, ipcPort=33929, storageInfo=lv=-57;cid=testClusterID;nsid=989579585;c=1731243630326) 2024-11-10T13:00:42,447 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc6e1b7620eda4ac8 with lease ID 0x25040637d64d6311: from storage DS-aaac7a58-abff-4c0f-9dca-4c9dad116257 node DatanodeRegistration(127.0.0.1:33513, datanodeUuid=bfc87161-86af-4fa0-8a78-daabb5d93818, infoPort=34759, infoSecurePort=0, ipcPort=33929, storageInfo=lv=-57;cid=testClusterID;nsid=989579585;c=1731243630326), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T13:00:42,490 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4438143d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/java.io.tmpdir/jetty-localhost-33873-hadoop-hdfs-3_4_1-tests_jar-_-any-15885191816401193037/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T13:00:42,491 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@32dac098{HTTP/1.1, (http/1.1)}{localhost:33873} 2024-11-10T13:00:42,491 INFO [Time-limited test {}] server.Server(415): Started @115462ms 2024-11-10T13:00:42,492 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-10T13:00:42,535 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T13:00:42,539 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T13:00:42,540 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T13:00:42,540 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T13:00:42,540 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-10T13:00:42,541 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3dc0bdb3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/hadoop.log.dir/,AVAILABLE} 2024-11-10T13:00:42,542 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@50ff2063{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T13:00:42,592 WARN [Thread-864 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/cluster_ca3516ed-3462-34cb-04d5-c3857eb856b7/data/data7/current/BP-925157896-172.17.0.2-1731243630326/current, will proceed with Du for space computation calculation, 2024-11-10T13:00:42,593 WARN [Thread-865 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/cluster_ca3516ed-3462-34cb-04d5-c3857eb856b7/data/data8/current/BP-925157896-172.17.0.2-1731243630326/current, will proceed with Du for space computation calculation, 2024-11-10T13:00:42,613 WARN [Thread-844 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-10T13:00:42,615 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3717fdd14b7728fa with lease ID 0x25040637d64d6312: Processing first storage report for DS-6280841e-9118-428f-9e2a-14ca17e63f55 from datanode DatanodeRegistration(127.0.0.1:34495, datanodeUuid=a1f556c6-aae3-4ee9-826c-02aac278b22e, infoPort=46285, infoSecurePort=0, ipcPort=42993, storageInfo=lv=-57;cid=testClusterID;nsid=989579585;c=1731243630326) 2024-11-10T13:00:42,615 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3717fdd14b7728fa with lease ID 0x25040637d64d6312: from storage DS-6280841e-9118-428f-9e2a-14ca17e63f55 node DatanodeRegistration(127.0.0.1:34495, datanodeUuid=a1f556c6-aae3-4ee9-826c-02aac278b22e, infoPort=46285, infoSecurePort=0, ipcPort=42993, storageInfo=lv=-57;cid=testClusterID;nsid=989579585;c=1731243630326), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T13:00:42,615 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3717fdd14b7728fa with lease ID 0x25040637d64d6312: Processing first storage report for DS-21babd5f-a543-4e45-a715-36165a014494 from datanode DatanodeRegistration(127.0.0.1:34495, datanodeUuid=a1f556c6-aae3-4ee9-826c-02aac278b22e, infoPort=46285, infoSecurePort=0, ipcPort=42993, storageInfo=lv=-57;cid=testClusterID;nsid=989579585;c=1731243630326) 2024-11-10T13:00:42,615 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3717fdd14b7728fa with lease ID 0x25040637d64d6312: from storage DS-21babd5f-a543-4e45-a715-36165a014494 node DatanodeRegistration(127.0.0.1:34495, datanodeUuid=a1f556c6-aae3-4ee9-826c-02aac278b22e, infoPort=46285, infoSecurePort=0, ipcPort=42993, storageInfo=lv=-57;cid=testClusterID;nsid=989579585;c=1731243630326), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T13:00:42,661 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4c81b75d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/java.io.tmpdir/jetty-localhost-39203-hadoop-hdfs-3_4_1-tests_jar-_-any-14431637361501952656/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T13:00:42,662 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7f55aa3b{HTTP/1.1, (http/1.1)}{localhost:39203} 2024-11-10T13:00:42,662 INFO [Time-limited test {}] server.Server(415): Started @115633ms 2024-11-10T13:00:42,663 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-10T13:00:42,760 WARN [Thread-890 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/cluster_ca3516ed-3462-34cb-04d5-c3857eb856b7/data/data9/current/BP-925157896-172.17.0.2-1731243630326/current, will proceed with Du for space computation calculation, 2024-11-10T13:00:42,760 WARN [Thread-891 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/cluster_ca3516ed-3462-34cb-04d5-c3857eb856b7/data/data10/current/BP-925157896-172.17.0.2-1731243630326/current, will proceed with Du for space computation calculation, 2024-11-10T13:00:42,776 WARN [Thread-879 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-10T13:00:42,779 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4ba52d6305baa0d6 with lease ID 0x25040637d64d6313: Processing first storage report for DS-ce5a4fa7-c07a-4ea4-8fa2-dd4afc17cb96 from datanode DatanodeRegistration(127.0.0.1:40561, datanodeUuid=f52f7a8d-9889-4b75-bd6b-ac75ae457111, infoPort=40061, infoSecurePort=0, ipcPort=42555, storageInfo=lv=-57;cid=testClusterID;nsid=989579585;c=1731243630326) 2024-11-10T13:00:42,779 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4ba52d6305baa0d6 with lease ID 0x25040637d64d6313: from storage DS-ce5a4fa7-c07a-4ea4-8fa2-dd4afc17cb96 node DatanodeRegistration(127.0.0.1:40561, datanodeUuid=f52f7a8d-9889-4b75-bd6b-ac75ae457111, infoPort=40061, infoSecurePort=0, ipcPort=42555, storageInfo=lv=-57;cid=testClusterID;nsid=989579585;c=1731243630326), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T13:00:42,779 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4ba52d6305baa0d6 with lease ID 0x25040637d64d6313: Processing first storage report for DS-173d013d-8557-405c-b6c9-6283d7495547 from datanode DatanodeRegistration(127.0.0.1:40561, datanodeUuid=f52f7a8d-9889-4b75-bd6b-ac75ae457111, infoPort=40061, infoSecurePort=0, ipcPort=42555, storageInfo=lv=-57;cid=testClusterID;nsid=989579585;c=1731243630326) 2024-11-10T13:00:42,779 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4ba52d6305baa0d6 with lease ID 0x25040637d64d6313: from storage DS-173d013d-8557-405c-b6c9-6283d7495547 node DatanodeRegistration(127.0.0.1:40561, datanodeUuid=f52f7a8d-9889-4b75-bd6b-ac75ae457111, infoPort=40061, infoSecurePort=0, ipcPort=42555, storageInfo=lv=-57;cid=testClusterID;nsid=989579585;c=1731243630326), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T13:00:42,783 WARN [ResponseProcessor for block BP-925157896-172.17.0.2-1731243630326:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-925157896-172.17.0.2-1731243630326:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:42,783 WARN [ResponseProcessor for block BP-925157896-172.17.0.2-1731243630326:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-925157896-172.17.0.2-1731243630326:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:42,783 WARN [ResponseProcessor for block BP-925157896-172.17.0.2-1731243630326:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-925157896-172.17.0.2-1731243630326:blk_1073741837_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:42,783 WARN [ResponseProcessor for block BP-925157896-172.17.0.2-1731243630326:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-925157896-172.17.0.2-1731243630326:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-925157896-172.17.0.2-1731243630326:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:35391,DS-7d81c0d1-69f3-4b0a-a084-b628cd80149c,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:42,783 WARN [DataStreamer for file /user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 block BP-925157896-172.17.0.2-1731243630326:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-925157896-172.17.0.2-1731243630326:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35391,DS-7d81c0d1-69f3-4b0a-a084-b628cd80149c,DISK], DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35391,DS-7d81c0d1-69f3-4b0a-a084-b628cd80149c,DISK]) is bad. 2024-11-10T13:00:42,783 WARN [DataStreamer for file /user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta block BP-925157896-172.17.0.2-1731243630326:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-925157896-172.17.0.2-1731243630326:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35391,DS-7d81c0d1-69f3-4b0a-a084-b628cd80149c,DISK], DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35391,DS-7d81c0d1-69f3-4b0a-a084-b628cd80149c,DISK]) is bad. 2024-11-10T13:00:42,784 WARN [DataStreamer for file /user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.1731243631502 block BP-925157896-172.17.0.2-1731243630326:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-925157896-172.17.0.2-1731243630326:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35391,DS-7d81c0d1-69f3-4b0a-a084-b628cd80149c,DISK], DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35391,DS-7d81c0d1-69f3-4b0a-a084-b628cd80149c,DISK]) is bad. 2024-11-10T13:00:42,784 WARN [DataStreamer for file /user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/MasterData/WALs/3857ccc89b65,44143,1731243631050/3857ccc89b65%2C44143%2C1731243631050.1731243631199 block BP-925157896-172.17.0.2-1731243630326:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-925157896-172.17.0.2-1731243630326:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK], DatanodeInfoWithStorage[127.0.0.1:35391,DS-7d81c0d1-69f3-4b0a-a084-b628cd80149c,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35391,DS-7d81c0d1-69f3-4b0a-a084-b628cd80149c,DISK]) is bad. 2024-11-10T13:00:42,784 WARN [PacketResponder: BP-925157896-172.17.0.2-1731243630326:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:35391] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:00:42,784 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1012691277_22 at /127.0.0.1:34348 [Receiving block BP-925157896-172.17.0.2-1731243630326:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:35391:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34348 dst: /127.0.0.1:35391 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:00:42,784 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1012691277_22 at /127.0.0.1:34360 [Receiving block BP-925157896-172.17.0.2-1731243630326:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:35391:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34360 dst: /127.0.0.1:35391 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:00:42,785 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1469845502_22 at /127.0.0.1:37822 [Receiving block BP-925157896-172.17.0.2-1731243630326:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:40725:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37822 dst: /127.0.0.1:40725 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:00:42,785 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3f2859b3{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T13:00:42,784 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-161219291_22 at /127.0.0.1:34388 [Receiving block BP-925157896-172.17.0.2-1731243630326:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:35391:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34388 dst: /127.0.0.1:35391 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:00:42,786 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-161219291_22 at /127.0.0.1:37872 [Receiving block BP-925157896-172.17.0.2-1731243630326:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:40725:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37872 dst: /127.0.0.1:40725 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:00:42,786 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1012691277_22 at /127.0.0.1:37840 [Receiving block BP-925157896-172.17.0.2-1731243630326:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:40725:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37840 dst: /127.0.0.1:40725 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:00:42,786 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1469845502_22 at /127.0.0.1:34318 [Receiving block BP-925157896-172.17.0.2-1731243630326:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:35391:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34318 dst: /127.0.0.1:35391 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:00:42,786 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1012691277_22 at /127.0.0.1:37846 [Receiving block BP-925157896-172.17.0.2-1731243630326:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:40725:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37846 dst: /127.0.0.1:40725 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:00:42,787 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3b918d2a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T13:00:42,787 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T13:00:42,787 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7eee535{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T13:00:42,787 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@66182b08{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/hadoop.log.dir/,STOPPED} 2024-11-10T13:00:42,788 WARN [BP-925157896-172.17.0.2-1731243630326 heartbeating to localhost/127.0.0.1:35903 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-10T13:00:42,788 WARN [BP-925157896-172.17.0.2-1731243630326 heartbeating to localhost/127.0.0.1:35903 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-925157896-172.17.0.2-1731243630326 (Datanode Uuid 07be666f-232a-4760-aea5-e1fb3b4f419c) service to localhost/127.0.0.1:35903 2024-11-10T13:00:42,788 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-10T13:00:42,788 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-10T13:00:42,789 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/cluster_ca3516ed-3462-34cb-04d5-c3857eb856b7/data/data3/current/BP-925157896-172.17.0.2-1731243630326 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T13:00:42,789 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/cluster_ca3516ed-3462-34cb-04d5-c3857eb856b7/data/data4/current/BP-925157896-172.17.0.2-1731243630326 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T13:00:42,790 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-10T13:00:42,790 WARN [DataStreamer for file /user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta block BP-925157896-172.17.0.2-1731243630326:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:42,790 WARN [DataStreamer for file /user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/MasterData/WALs/3857ccc89b65,44143,1731243631050/3857ccc89b65%2C44143%2C1731243631050.1731243631199 block BP-925157896-172.17.0.2-1731243630326:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:42,790 WARN [DataStreamer for file /user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.1731243631502 block BP-925157896-172.17.0.2-1731243630326:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:42,792 WARN [DataStreamer for file /user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 block BP-925157896-172.17.0.2-1731243630326:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:42,797 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6de86657{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T13:00:42,797 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6787773a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T13:00:42,798 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T13:00:42,798 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5a107105{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T13:00:42,798 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@65dec1b8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/hadoop.log.dir/,STOPPED} 2024-11-10T13:00:42,799 WARN [BP-925157896-172.17.0.2-1731243630326 heartbeating to localhost/127.0.0.1:35903 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-10T13:00:42,799 WARN [BP-925157896-172.17.0.2-1731243630326 heartbeating to localhost/127.0.0.1:35903 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-925157896-172.17.0.2-1731243630326 (Datanode Uuid 37e29012-4976-4502-bb9a-d48f62b8c2f4) service to localhost/127.0.0.1:35903 2024-11-10T13:00:42,799 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-10T13:00:42,799 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-10T13:00:42,800 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/cluster_ca3516ed-3462-34cb-04d5-c3857eb856b7/data/data1/current/BP-925157896-172.17.0.2-1731243630326 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T13:00:42,800 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/cluster_ca3516ed-3462-34cb-04d5-c3857eb856b7/data/data2/current/BP-925157896-172.17.0.2-1731243630326 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T13:00:42,800 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-10T13:00:42,804 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1731243632170.420a6a06ae441e38de6228ffc018d528., hostname=3857ccc89b65,44789,1731243631098, seqNum=2] 2024-11-10T13:00:42,806 ERROR [FSHLog-0-hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de-prefix:3857ccc89b65,44789,1731243631098 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:42,806 WARN [FSHLog-0-hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de-prefix:3857ccc89b65,44789,1731243631098 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:42,806 INFO [regionserver/3857ccc89b65:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:42,806 DEBUG [regionserver/3857ccc89b65:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3857ccc89b65%2C44789%2C1731243631098:(num 1731243631502) roll requested 2024-11-10T13:00:42,806 INFO [regionserver/3857ccc89b65:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3857ccc89b65%2C44789%2C1731243631098.1731243642806 2024-11-10T13:00:42,809 WARN [Thread-901 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741838_1018 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:42,809 WARN [Thread-901 {}] hdfs.DataStreamer(1731): Error Recovery for BP-925157896-172.17.0.2-1731243630326:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35391,DS-7d81c0d1-69f3-4b0a-a084-b628cd80149c,DISK], DatanodeInfoWithStorage[127.0.0.1:34495,DS-6280841e-9118-428f-9e2a-14ca17e63f55,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35391,DS-7d81c0d1-69f3-4b0a-a084-b628cd80149c,DISK]) is bad. 2024-11-10T13:00:42,809 WARN [Thread-901 {}] hdfs.DataStreamer(1850): Abandoning BP-925157896-172.17.0.2-1731243630326:blk_1073741838_1018 2024-11-10T13:00:42,812 WARN [Thread-901 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35391,DS-7d81c0d1-69f3-4b0a-a084-b628cd80149c,DISK] 2024-11-10T13:00:42,818 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:42,818 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:42,818 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:42,818 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:42,818 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:42,818 INFO [regionserver/3857ccc89b65:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.1731243631502 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.1731243642806 2024-11-10T13:00:42,819 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:42,819 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:42,820 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-10T13:00:42,820 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-10T13:00:42,820 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.1731243631502 2024-11-10T13:00:42,822 DEBUG [regionserver/3857ccc89b65:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40061:40061),(127.0.0.1/127.0.0.1:34759:34759)] 2024-11-10T13:00:42,822 DEBUG [regionserver/3857ccc89b65:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.1731243631502 is not closed yet, will try archiving it next time 2024-11-10T13:00:42,823 WARN [IPC Server handler 0 on default port 35903 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.1731243631502 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741833_1009 2024-11-10T13:00:42,826 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.1731243631502 after 5ms 2024-11-10T13:00:43,179 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:44,137 INFO [regionserver/3857ccc89b65:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:44,823 INFO [regionserver/3857ccc89b65:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:44,824 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.1731243642806 2024-11-10T13:00:44,825 WARN [ResponseProcessor for block BP-925157896-172.17.0.2-1731243630326:blk_1073741839_1019 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-925157896-172.17.0.2-1731243630326:blk_1073741839_1019 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:44,825 WARN [DataStreamer for file /user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.1731243642806 block BP-925157896-172.17.0.2-1731243630326:blk_1073741839_1019 {}] hdfs.DataStreamer(1731): Error Recovery for BP-925157896-172.17.0.2-1731243630326:blk_1073741839_1019 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40561,DS-ce5a4fa7-c07a-4ea4-8fa2-dd4afc17cb96,DISK], DatanodeInfoWithStorage[127.0.0.1:33513,DS-b5f80209-4e09-49c3-8a4c-611ce85f484d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40561,DS-ce5a4fa7-c07a-4ea4-8fa2-dd4afc17cb96,DISK]) is bad. 2024-11-10T13:00:44,825 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1012691277_22 at /127.0.0.1:58392 [Receiving block BP-925157896-172.17.0.2-1731243630326:blk_1073741839_1019] {}] datanode.DataXceiver(331): 127.0.0.1:40561:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58392 dst: /127.0.0.1:40561 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:00:44,826 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1012691277_22 at /127.0.0.1:32900 [Receiving block BP-925157896-172.17.0.2-1731243630326:blk_1073741839_1019] {}] datanode.DataXceiver(331): 127.0.0.1:33513:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:32900 dst: /127.0.0.1:33513 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:00:44,828 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4c81b75d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T13:00:44,828 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7f55aa3b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T13:00:44,828 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T13:00:44,828 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@50ff2063{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T13:00:44,828 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3dc0bdb3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/hadoop.log.dir/,STOPPED} 2024-11-10T13:00:44,831 WARN [BP-925157896-172.17.0.2-1731243630326 heartbeating to localhost/127.0.0.1:35903 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-10T13:00:44,831 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-10T13:00:44,831 WARN [BP-925157896-172.17.0.2-1731243630326 heartbeating to localhost/127.0.0.1:35903 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-925157896-172.17.0.2-1731243630326 (Datanode Uuid f52f7a8d-9889-4b75-bd6b-ac75ae457111) service to localhost/127.0.0.1:35903 2024-11-10T13:00:44,831 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-10T13:00:44,832 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/cluster_ca3516ed-3462-34cb-04d5-c3857eb856b7/data/data9/current/BP-925157896-172.17.0.2-1731243630326 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T13:00:44,832 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/cluster_ca3516ed-3462-34cb-04d5-c3857eb856b7/data/data10/current/BP-925157896-172.17.0.2-1731243630326 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T13:00:44,832 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-10T13:00:45,179 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:46,137 INFO [regionserver/3857ccc89b65:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:46,823 INFO [regionserver/3857ccc89b65:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:46,824 WARN [regionserver/3857ccc89b65:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33513,DS-b5f80209-4e09-49c3-8a4c-611ce85f484d,DISK]] 2024-11-10T13:00:46,824 DEBUG [regionserver/3857ccc89b65:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3857ccc89b65%2C44789%2C1731243631098:(num 1731243642806) roll requested 2024-11-10T13:00:46,824 INFO [regionserver/3857ccc89b65:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3857ccc89b65%2C44789%2C1731243631098.1731243646824 2024-11-10T13:00:46,827 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.1731243631502 after 4007ms 2024-11-10T13:00:46,828 WARN [Thread-910 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1022 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:46,828 WARN [Thread-910 {}] hdfs.DataStreamer(1731): Error Recovery for BP-925157896-172.17.0.2-1731243630326:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35391,DS-7d81c0d1-69f3-4b0a-a084-b628cd80149c,DISK], DatanodeInfoWithStorage[127.0.0.1:34495,DS-6280841e-9118-428f-9e2a-14ca17e63f55,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35391,DS-7d81c0d1-69f3-4b0a-a084-b628cd80149c,DISK]) is bad. 2024-11-10T13:00:46,828 WARN [Thread-910 {}] hdfs.DataStreamer(1850): Abandoning BP-925157896-172.17.0.2-1731243630326:blk_1073741840_1022 2024-11-10T13:00:46,829 WARN [Thread-910 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35391,DS-7d81c0d1-69f3-4b0a-a084-b628cd80149c,DISK] 2024-11-10T13:00:46,833 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:46,833 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:46,833 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:46,833 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:46,833 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:46,833 INFO [regionserver/3857ccc89b65:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.1731243642806 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.1731243646824 2024-11-10T13:00:46,834 DEBUG [regionserver/3857ccc89b65:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46285:46285),(127.0.0.1/127.0.0.1:34759:34759)] 2024-11-10T13:00:46,834 DEBUG [regionserver/3857ccc89b65:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.1731243631502 is not closed yet, will try archiving it next time 2024-11-10T13:00:46,834 DEBUG [regionserver/3857ccc89b65:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.1731243642806 is not closed yet, will try archiving it next time 2024-11-10T13:00:46,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33513 is added to blk_1073741839_1021 (size=2431) 2024-11-10T13:00:46,836 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-10T13:00:47,179 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:47,236 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.1731243631502 is not closed yet, will try archiving it next time 2024-11-10T13:00:48,137 INFO [regionserver/3857ccc89b65:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:48,835 INFO [regionserver/3857ccc89b65:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:48,840 WARN [ResponseProcessor for block BP-925157896-172.17.0.2-1731243630326:blk_1073741841_1023 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-925157896-172.17.0.2-1731243630326:blk_1073741841_1023 java.io.IOException: Bad response ERROR for BP-925157896-172.17.0.2-1731243630326:blk_1073741841_1023 from datanode DatanodeInfoWithStorage[127.0.0.1:33513,DS-b5f80209-4e09-49c3-8a4c-611ce85f484d,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:48,841 WARN [DataStreamer for file /user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.1731243646824 block BP-925157896-172.17.0.2-1731243630326:blk_1073741841_1023 {}] hdfs.DataStreamer(1731): Error Recovery for BP-925157896-172.17.0.2-1731243630326:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34495,DS-6280841e-9118-428f-9e2a-14ca17e63f55,DISK], DatanodeInfoWithStorage[127.0.0.1:33513,DS-b5f80209-4e09-49c3-8a4c-611ce85f484d,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33513,DS-b5f80209-4e09-49c3-8a4c-611ce85f484d,DISK]) is bad. 2024-11-10T13:00:48,841 WARN [PacketResponder: BP-925157896-172.17.0.2-1731243630326:blk_1073741841_1023, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:33513] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:00:48,841 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1012691277_22 at /127.0.0.1:34368 [Receiving block BP-925157896-172.17.0.2-1731243630326:blk_1073741841_1023] {}] datanode.DataXceiver(331): 127.0.0.1:34495:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34368 dst: /127.0.0.1:34495 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:00:48,841 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1012691277_22 at /127.0.0.1:32930 [Receiving block BP-925157896-172.17.0.2-1731243630326:blk_1073741841_1023] {}] datanode.DataXceiver(331): 127.0.0.1:33513:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:32930 dst: /127.0.0.1:33513 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:00:48,842 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@730725ba{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T13:00:48,843 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@20e1b523{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T13:00:48,843 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T13:00:48,843 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6d0f4a9a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T13:00:48,843 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@21ddd8f9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/hadoop.log.dir/,STOPPED} 2024-11-10T13:00:48,844 WARN [BP-925157896-172.17.0.2-1731243630326 heartbeating to localhost/127.0.0.1:35903 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-10T13:00:48,844 WARN [BP-925157896-172.17.0.2-1731243630326 heartbeating to localhost/127.0.0.1:35903 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-925157896-172.17.0.2-1731243630326 (Datanode Uuid bfc87161-86af-4fa0-8a78-daabb5d93818) service to localhost/127.0.0.1:35903 2024-11-10T13:00:48,844 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-10T13:00:48,844 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-10T13:00:48,845 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/cluster_ca3516ed-3462-34cb-04d5-c3857eb856b7/data/data5/current/BP-925157896-172.17.0.2-1731243630326 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T13:00:48,845 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/cluster_ca3516ed-3462-34cb-04d5-c3857eb856b7/data/data6/current/BP-925157896-172.17.0.2-1731243630326 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T13:00:48,845 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-10T13:00:48,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44789 {}] regionserver.HRegion(8855): Flush requested on 420a6a06ae441e38de6228ffc018d528 2024-11-10T13:00:48,855 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 420a6a06ae441e38de6228ffc018d528 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-10T13:00:48,872 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/.tmp/info/376868b067c14a7798ad69e9fc5094fc is 1080, key is row0002/info:/1731243644833/Put/seqid=0 2024-11-10T13:00:48,875 WARN [Thread-919 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741842_1025 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40561 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:48,875 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1012691277_22 at /127.0.0.1:34398 [Receiving block BP-925157896-172.17.0.2-1731243630326:blk_1073741842_1025] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/cluster_ca3516ed-3462-34cb-04d5-c3857eb856b7/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/cluster_ca3516ed-3462-34cb-04d5-c3857eb856b7/data/data8]'}, localName='127.0.0.1:34495', datanodeUuid='a1f556c6-aae3-4ee9-826c-02aac278b22e', xmitsInProgress=0}:Exception transferring block BP-925157896-172.17.0.2-1731243630326:blk_1073741842_1025 to mirror 127.0.0.1:40561 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:00:48,875 WARN [Thread-919 {}] hdfs.DataStreamer(1731): Error Recovery for BP-925157896-172.17.0.2-1731243630326:blk_1073741842_1025 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34495,DS-6280841e-9118-428f-9e2a-14ca17e63f55,DISK], DatanodeInfoWithStorage[127.0.0.1:40561,DS-ce5a4fa7-c07a-4ea4-8fa2-dd4afc17cb96,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40561,DS-ce5a4fa7-c07a-4ea4-8fa2-dd4afc17cb96,DISK]) is bad. 2024-11-10T13:00:48,876 WARN [Thread-919 {}] hdfs.DataStreamer(1850): Abandoning BP-925157896-172.17.0.2-1731243630326:blk_1073741842_1025 2024-11-10T13:00:48,876 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1012691277_22 at /127.0.0.1:34398 [Receiving block BP-925157896-172.17.0.2-1731243630326:blk_1073741842_1025] {}] datanode.BlockReceiver(316): Block 1073741842 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-10T13:00:48,876 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1012691277_22 at /127.0.0.1:34398 [Receiving block BP-925157896-172.17.0.2-1731243630326:blk_1073741842_1025] {}] datanode.DataXceiver(331): 127.0.0.1:34495:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34398 dst: /127.0.0.1:34495 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:00:48,876 WARN [Thread-919 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40561,DS-ce5a4fa7-c07a-4ea4-8fa2-dd4afc17cb96,DISK] 2024-11-10T13:00:48,877 WARN [Thread-919 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1026 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:48,877 WARN [Thread-919 {}] hdfs.DataStreamer(1731): Error Recovery for BP-925157896-172.17.0.2-1731243630326:blk_1073741843_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35391,DS-7d81c0d1-69f3-4b0a-a084-b628cd80149c,DISK], DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35391,DS-7d81c0d1-69f3-4b0a-a084-b628cd80149c,DISK]) is bad. 2024-11-10T13:00:48,878 WARN [Thread-919 {}] hdfs.DataStreamer(1850): Abandoning BP-925157896-172.17.0.2-1731243630326:blk_1073741843_1026 2024-11-10T13:00:48,878 WARN [Thread-919 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35391,DS-7d81c0d1-69f3-4b0a-a084-b628cd80149c,DISK] 2024-11-10T13:00:48,879 WARN [Thread-919 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:48,879 WARN [Thread-919 {}] hdfs.DataStreamer(1731): Error Recovery for BP-925157896-172.17.0.2-1731243630326:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33513,DS-b5f80209-4e09-49c3-8a4c-611ce85f484d,DISK], DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33513,DS-b5f80209-4e09-49c3-8a4c-611ce85f484d,DISK]) is bad. 2024-11-10T13:00:48,879 WARN [Thread-919 {}] hdfs.DataStreamer(1850): Abandoning BP-925157896-172.17.0.2-1731243630326:blk_1073741844_1027 2024-11-10T13:00:48,880 WARN [Thread-919 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33513,DS-b5f80209-4e09-49c3-8a4c-611ce85f484d,DISK] 2024-11-10T13:00:48,881 WARN [Thread-919 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:48,881 WARN [Thread-919 {}] hdfs.DataStreamer(1731): Error Recovery for BP-925157896-172.17.0.2-1731243630326:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK], DatanodeInfoWithStorage[127.0.0.1:34495,DS-6280841e-9118-428f-9e2a-14ca17e63f55,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]) is bad. 2024-11-10T13:00:48,881 WARN [Thread-919 {}] hdfs.DataStreamer(1850): Abandoning BP-925157896-172.17.0.2-1731243630326:blk_1073741845_1028 2024-11-10T13:00:48,882 WARN [Thread-919 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK] 2024-11-10T13:00:48,882 WARN [IPC Server handler 0 on default port 35903 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-10T13:00:48,882 WARN [IPC Server handler 0 on default port 35903 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-10T13:00:48,882 WARN [IPC Server handler 0 on default port 35903 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-10T13:00:48,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34495 is added to blk_1073741846_1029 (size=10347) 2024-11-10T13:00:49,180 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:49,286 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/.tmp/info/376868b067c14a7798ad69e9fc5094fc 2024-11-10T13:00:49,294 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/.tmp/info/376868b067c14a7798ad69e9fc5094fc as hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/info/376868b067c14a7798ad69e9fc5094fc 2024-11-10T13:00:49,300 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/info/376868b067c14a7798ad69e9fc5094fc, entries=5, sequenceid=11, filesize=10.1 K 2024-11-10T13:00:49,301 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for 420a6a06ae441e38de6228ffc018d528 in 446ms, sequenceid=11, compaction requested=false 2024-11-10T13:00:49,301 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 420a6a06ae441e38de6228ffc018d528: 2024-11-10T13:00:49,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44789 {}] regionserver.HRegion(8855): Flush requested on 420a6a06ae441e38de6228ffc018d528 2024-11-10T13:00:49,477 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 420a6a06ae441e38de6228ffc018d528 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-11-10T13:00:49,482 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/.tmp/info/07e85aef0cc7405598c4a2dbcf5522b8 is 1080, key is row0007/info:/1731243648856/Put/seqid=0 2024-11-10T13:00:49,484 WARN [Thread-925 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741847_1030 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35391 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:49,485 WARN [Thread-925 {}] hdfs.DataStreamer(1731): Error Recovery for BP-925157896-172.17.0.2-1731243630326:blk_1073741847_1030 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34495,DS-6280841e-9118-428f-9e2a-14ca17e63f55,DISK], DatanodeInfoWithStorage[127.0.0.1:35391,DS-7d81c0d1-69f3-4b0a-a084-b628cd80149c,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35391,DS-7d81c0d1-69f3-4b0a-a084-b628cd80149c,DISK]) is bad. 2024-11-10T13:00:49,484 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1012691277_22 at /127.0.0.1:34420 [Receiving block BP-925157896-172.17.0.2-1731243630326:blk_1073741847_1030] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/cluster_ca3516ed-3462-34cb-04d5-c3857eb856b7/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/cluster_ca3516ed-3462-34cb-04d5-c3857eb856b7/data/data8]'}, localName='127.0.0.1:34495', datanodeUuid='a1f556c6-aae3-4ee9-826c-02aac278b22e', xmitsInProgress=0}:Exception transferring block BP-925157896-172.17.0.2-1731243630326:blk_1073741847_1030 to mirror 127.0.0.1:35391 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:00:49,485 WARN [Thread-925 {}] hdfs.DataStreamer(1850): Abandoning BP-925157896-172.17.0.2-1731243630326:blk_1073741847_1030 2024-11-10T13:00:49,485 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1012691277_22 at /127.0.0.1:34420 [Receiving block BP-925157896-172.17.0.2-1731243630326:blk_1073741847_1030] {}] datanode.BlockReceiver(316): Block 1073741847 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-10T13:00:49,485 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1012691277_22 at /127.0.0.1:34420 [Receiving block BP-925157896-172.17.0.2-1731243630326:blk_1073741847_1030] {}] datanode.DataXceiver(331): 127.0.0.1:34495:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34420 dst: /127.0.0.1:34495 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:00:49,485 WARN [Thread-925 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35391,DS-7d81c0d1-69f3-4b0a-a084-b628cd80149c,DISK] 2024-11-10T13:00:49,486 WARN [Thread-925 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:49,486 WARN [Thread-925 {}] hdfs.DataStreamer(1731): Error Recovery for BP-925157896-172.17.0.2-1731243630326:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33513,DS-b5f80209-4e09-49c3-8a4c-611ce85f484d,DISK], DatanodeInfoWithStorage[127.0.0.1:40561,DS-ce5a4fa7-c07a-4ea4-8fa2-dd4afc17cb96,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33513,DS-b5f80209-4e09-49c3-8a4c-611ce85f484d,DISK]) is bad. 2024-11-10T13:00:49,486 WARN [Thread-925 {}] hdfs.DataStreamer(1850): Abandoning BP-925157896-172.17.0.2-1731243630326:blk_1073741848_1031 2024-11-10T13:00:49,487 WARN [Thread-925 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33513,DS-b5f80209-4e09-49c3-8a4c-611ce85f484d,DISK] 2024-11-10T13:00:49,488 WARN [Thread-925 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:49,488 WARN [Thread-925 {}] hdfs.DataStreamer(1731): Error Recovery for BP-925157896-172.17.0.2-1731243630326:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40561,DS-ce5a4fa7-c07a-4ea4-8fa2-dd4afc17cb96,DISK], DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40561,DS-ce5a4fa7-c07a-4ea4-8fa2-dd4afc17cb96,DISK]) is bad. 2024-11-10T13:00:49,488 WARN [Thread-925 {}] hdfs.DataStreamer(1850): Abandoning BP-925157896-172.17.0.2-1731243630326:blk_1073741849_1032 2024-11-10T13:00:49,489 WARN [Thread-925 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40561,DS-ce5a4fa7-c07a-4ea4-8fa2-dd4afc17cb96,DISK] 2024-11-10T13:00:49,489 WARN [Thread-925 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:49,490 WARN [Thread-925 {}] hdfs.DataStreamer(1731): Error Recovery for BP-925157896-172.17.0.2-1731243630326:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK], DatanodeInfoWithStorage[127.0.0.1:34495,DS-6280841e-9118-428f-9e2a-14ca17e63f55,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]) is bad. 2024-11-10T13:00:49,490 WARN [Thread-925 {}] hdfs.DataStreamer(1850): Abandoning BP-925157896-172.17.0.2-1731243630326:blk_1073741850_1033 2024-11-10T13:00:49,490 WARN [Thread-925 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK] 2024-11-10T13:00:49,491 WARN [IPC Server handler 0 on default port 35903 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-10T13:00:49,491 WARN [IPC Server handler 0 on default port 35903 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-10T13:00:49,491 WARN [IPC Server handler 0 on default port 35903 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-10T13:00:49,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34495 is added to blk_1073741851_1034 (size=12506) 2024-11-10T13:00:49,894 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/.tmp/info/07e85aef0cc7405598c4a2dbcf5522b8 2024-11-10T13:00:49,901 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/.tmp/info/07e85aef0cc7405598c4a2dbcf5522b8 as hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/info/07e85aef0cc7405598c4a2dbcf5522b8 2024-11-10T13:00:49,906 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/info/07e85aef0cc7405598c4a2dbcf5522b8, entries=7, sequenceid=24, filesize=12.2 K 2024-11-10T13:00:49,907 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for 420a6a06ae441e38de6228ffc018d528 in 430ms, sequenceid=24, compaction requested=false 2024-11-10T13:00:49,907 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 420a6a06ae441e38de6228ffc018d528: 2024-11-10T13:00:49,908 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-11-10T13:00:49,908 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-10T13:00:49,908 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/info/07e85aef0cc7405598c4a2dbcf5522b8 because midkey is the same as first or last row 2024-11-10T13:00:50,137 INFO [regionserver/3857ccc89b65:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:50,835 INFO [regionserver/3857ccc89b65:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:50,835 WARN [regionserver/3857ccc89b65:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34495,DS-6280841e-9118-428f-9e2a-14ca17e63f55,DISK]] 2024-11-10T13:00:50,835 DEBUG [regionserver/3857ccc89b65:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3857ccc89b65%2C44789%2C1731243631098:(num 1731243646824) roll requested 2024-11-10T13:00:50,836 INFO [regionserver/3857ccc89b65:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3857ccc89b65%2C44789%2C1731243631098.1731243650835 2024-11-10T13:00:50,839 WARN [Thread-930 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741852_1035 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:50,839 WARN [Thread-930 {}] hdfs.DataStreamer(1731): Error Recovery for BP-925157896-172.17.0.2-1731243630326:blk_1073741852_1035 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35391,DS-7d81c0d1-69f3-4b0a-a084-b628cd80149c,DISK], DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35391,DS-7d81c0d1-69f3-4b0a-a084-b628cd80149c,DISK]) is bad. 2024-11-10T13:00:50,839 WARN [Thread-930 {}] hdfs.DataStreamer(1850): Abandoning BP-925157896-172.17.0.2-1731243630326:blk_1073741852_1035 2024-11-10T13:00:50,839 WARN [Thread-930 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35391,DS-7d81c0d1-69f3-4b0a-a084-b628cd80149c,DISK] 2024-11-10T13:00:50,841 WARN [Thread-930 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:50,841 WARN [Thread-930 {}] hdfs.DataStreamer(1731): Error Recovery for BP-925157896-172.17.0.2-1731243630326:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33513,DS-b5f80209-4e09-49c3-8a4c-611ce85f484d,DISK], DatanodeInfoWithStorage[127.0.0.1:34495,DS-6280841e-9118-428f-9e2a-14ca17e63f55,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33513,DS-b5f80209-4e09-49c3-8a4c-611ce85f484d,DISK]) is bad. 2024-11-10T13:00:50,841 WARN [Thread-930 {}] hdfs.DataStreamer(1850): Abandoning BP-925157896-172.17.0.2-1731243630326:blk_1073741853_1036 2024-11-10T13:00:50,841 WARN [Thread-930 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33513,DS-b5f80209-4e09-49c3-8a4c-611ce85f484d,DISK] 2024-11-10T13:00:50,842 WARN [Thread-930 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:50,842 WARN [Thread-930 {}] hdfs.DataStreamer(1731): Error Recovery for BP-925157896-172.17.0.2-1731243630326:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK], DatanodeInfoWithStorage[127.0.0.1:40561,DS-ce5a4fa7-c07a-4ea4-8fa2-dd4afc17cb96,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]) is bad. 2024-11-10T13:00:50,842 WARN [Thread-930 {}] hdfs.DataStreamer(1850): Abandoning BP-925157896-172.17.0.2-1731243630326:blk_1073741854_1037 2024-11-10T13:00:50,843 WARN [Thread-930 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK] 2024-11-10T13:00:50,844 WARN [Thread-930 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:50,844 WARN [Thread-930 {}] hdfs.DataStreamer(1731): Error Recovery for BP-925157896-172.17.0.2-1731243630326:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40561,DS-ce5a4fa7-c07a-4ea4-8fa2-dd4afc17cb96,DISK], DatanodeInfoWithStorage[127.0.0.1:34495,DS-6280841e-9118-428f-9e2a-14ca17e63f55,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40561,DS-ce5a4fa7-c07a-4ea4-8fa2-dd4afc17cb96,DISK]) is bad. 2024-11-10T13:00:50,845 WARN [Thread-930 {}] hdfs.DataStreamer(1850): Abandoning BP-925157896-172.17.0.2-1731243630326:blk_1073741855_1038 2024-11-10T13:00:50,845 WARN [Thread-930 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40561,DS-ce5a4fa7-c07a-4ea4-8fa2-dd4afc17cb96,DISK] 2024-11-10T13:00:50,846 WARN [IPC Server handler 3 on default port 35903 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-10T13:00:50,846 WARN [IPC Server handler 3 on default port 35903 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-10T13:00:50,846 WARN [IPC Server handler 3 on default port 35903 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-10T13:00:50,848 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:50,848 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:50,849 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:50,849 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:50,849 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:50,849 INFO [regionserver/3857ccc89b65:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.1731243646824 with entries=25, filesize=25.38 KB; new WAL /user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.1731243650835 2024-11-10T13:00:50,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34495 is added to blk_1073741841_1024 (size=25992) 2024-11-10T13:00:50,852 DEBUG [regionserver/3857ccc89b65:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46285:46285)] 2024-11-10T13:00:50,853 DEBUG [regionserver/3857ccc89b65:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.1731243631502 is not closed yet, will try archiving it next time 2024-11-10T13:00:50,853 DEBUG [regionserver/3857ccc89b65:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.1731243646824 is not closed yet, will try archiving it next time 2024-11-10T13:00:50,853 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.1731243642806 to hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/oldWALs/3857ccc89b65%2C44789%2C1731243631098.1731243642806 2024-11-10T13:00:50,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44789 {}] regionserver.HRegion(8855): Flush requested on 420a6a06ae441e38de6228ffc018d528 2024-11-10T13:00:50,895 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 420a6a06ae441e38de6228ffc018d528 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-10T13:00:50,900 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/.tmp/info/05d0109d1e78435c93eec29e57f1dd78 is 1079, key is tmprow/info:/1731243650894/Put/seqid=0 2024-11-10T13:00:50,901 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741857_1040 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:50,902 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-925157896-172.17.0.2-1731243630326:blk_1073741857_1040 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK], DatanodeInfoWithStorage[127.0.0.1:35391,DS-7d81c0d1-69f3-4b0a-a084-b628cd80149c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]) is bad. 2024-11-10T13:00:50,902 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-925157896-172.17.0.2-1731243630326:blk_1073741857_1040 2024-11-10T13:00:50,902 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK] 2024-11-10T13:00:50,903 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:50,903 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-925157896-172.17.0.2-1731243630326:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40561,DS-ce5a4fa7-c07a-4ea4-8fa2-dd4afc17cb96,DISK], DatanodeInfoWithStorage[127.0.0.1:33513,DS-b5f80209-4e09-49c3-8a4c-611ce85f484d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40561,DS-ce5a4fa7-c07a-4ea4-8fa2-dd4afc17cb96,DISK]) is bad. 2024-11-10T13:00:50,903 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-925157896-172.17.0.2-1731243630326:blk_1073741858_1041 2024-11-10T13:00:50,904 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40561,DS-ce5a4fa7-c07a-4ea4-8fa2-dd4afc17cb96,DISK] 2024-11-10T13:00:50,905 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:50,905 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-925157896-172.17.0.2-1731243630326:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33513,DS-b5f80209-4e09-49c3-8a4c-611ce85f484d,DISK], DatanodeInfoWithStorage[127.0.0.1:35391,DS-7d81c0d1-69f3-4b0a-a084-b628cd80149c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33513,DS-b5f80209-4e09-49c3-8a4c-611ce85f484d,DISK]) is bad. 2024-11-10T13:00:50,905 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-925157896-172.17.0.2-1731243630326:blk_1073741859_1042 2024-11-10T13:00:50,905 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33513,DS-b5f80209-4e09-49c3-8a4c-611ce85f484d,DISK] 2024-11-10T13:00:50,906 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:50,907 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-925157896-172.17.0.2-1731243630326:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35391,DS-7d81c0d1-69f3-4b0a-a084-b628cd80149c,DISK], DatanodeInfoWithStorage[127.0.0.1:34495,DS-6280841e-9118-428f-9e2a-14ca17e63f55,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35391,DS-7d81c0d1-69f3-4b0a-a084-b628cd80149c,DISK]) is bad. 2024-11-10T13:00:50,907 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-925157896-172.17.0.2-1731243630326:blk_1073741860_1043 2024-11-10T13:00:50,907 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35391,DS-7d81c0d1-69f3-4b0a-a084-b628cd80149c,DISK] 2024-11-10T13:00:50,908 WARN [IPC Server handler 0 on default port 35903 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-10T13:00:50,908 WARN [IPC Server handler 0 on default port 35903 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-10T13:00:50,908 WARN [IPC Server handler 0 on default port 35903 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-10T13:00:50,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34495 is added to blk_1073741861_1044 (size=6027) 2024-11-10T13:00:51,180 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:51,251 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.1731243631502 is not closed yet, will try archiving it next time 2024-11-10T13:00:51,311 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/.tmp/info/05d0109d1e78435c93eec29e57f1dd78 2024-11-10T13:00:51,318 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/.tmp/info/05d0109d1e78435c93eec29e57f1dd78 as hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/info/05d0109d1e78435c93eec29e57f1dd78 2024-11-10T13:00:51,323 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/info/05d0109d1e78435c93eec29e57f1dd78, entries=1, sequenceid=34, filesize=5.9 K 2024-11-10T13:00:51,324 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 420a6a06ae441e38de6228ffc018d528 in 429ms, sequenceid=34, compaction requested=true 2024-11-10T13:00:51,325 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 420a6a06ae441e38de6228ffc018d528: 2024-11-10T13:00:51,325 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-11-10T13:00:51,325 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-10T13:00:51,325 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/info/07e85aef0cc7405598c4a2dbcf5522b8 because midkey is the same as first or last row 2024-11-10T13:00:51,325 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 420a6a06ae441e38de6228ffc018d528:info, priority=-2147483648, current under compaction store size is 1 2024-11-10T13:00:51,325 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-10T13:00:51,325 DEBUG [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-10T13:00:51,326 DEBUG [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-10T13:00:51,326 DEBUG [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] regionserver.HStore(1541): 420a6a06ae441e38de6228ffc018d528/info is initiating minor compaction (all files) 2024-11-10T13:00:51,326 INFO [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 420a6a06ae441e38de6228ffc018d528/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731243632170.420a6a06ae441e38de6228ffc018d528. 2024-11-10T13:00:51,327 INFO [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/info/376868b067c14a7798ad69e9fc5094fc, hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/info/07e85aef0cc7405598c4a2dbcf5522b8, hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/info/05d0109d1e78435c93eec29e57f1dd78] into tmpdir=hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/.tmp, totalSize=28.2 K 2024-11-10T13:00:51,327 DEBUG [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] compactions.Compactor(225): Compacting 376868b067c14a7798ad69e9fc5094fc, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731243644833 2024-11-10T13:00:51,327 DEBUG [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] compactions.Compactor(225): Compacting 07e85aef0cc7405598c4a2dbcf5522b8, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1731243648856 2024-11-10T13:00:51,328 DEBUG [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] compactions.Compactor(225): Compacting 05d0109d1e78435c93eec29e57f1dd78, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1731243650894 2024-11-10T13:00:51,340 INFO [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 420a6a06ae441e38de6228ffc018d528#info#compaction#21 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-10T13:00:51,340 DEBUG [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/.tmp/info/56750e6709c448c180f63ea6df91676a is 1080, key is row0002/info:/1731243644833/Put/seqid=0 2024-11-10T13:00:51,342 WARN [Thread-940 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741862_1045 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:51,342 WARN [Thread-940 {}] hdfs.DataStreamer(1731): Error Recovery for BP-925157896-172.17.0.2-1731243630326:blk_1073741862_1045 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35391,DS-7d81c0d1-69f3-4b0a-a084-b628cd80149c,DISK], DatanodeInfoWithStorage[127.0.0.1:40561,DS-ce5a4fa7-c07a-4ea4-8fa2-dd4afc17cb96,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35391,DS-7d81c0d1-69f3-4b0a-a084-b628cd80149c,DISK]) is bad. 2024-11-10T13:00:51,342 WARN [Thread-940 {}] hdfs.DataStreamer(1850): Abandoning BP-925157896-172.17.0.2-1731243630326:blk_1073741862_1045 2024-11-10T13:00:51,343 WARN [Thread-940 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35391,DS-7d81c0d1-69f3-4b0a-a084-b628cd80149c,DISK] 2024-11-10T13:00:51,344 WARN [Thread-940 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:51,344 WARN [Thread-940 {}] hdfs.DataStreamer(1731): Error Recovery for BP-925157896-172.17.0.2-1731243630326:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK], DatanodeInfoWithStorage[127.0.0.1:40561,DS-ce5a4fa7-c07a-4ea4-8fa2-dd4afc17cb96,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]) is bad. 2024-11-10T13:00:51,344 WARN [Thread-940 {}] hdfs.DataStreamer(1850): Abandoning BP-925157896-172.17.0.2-1731243630326:blk_1073741863_1046 2024-11-10T13:00:51,345 WARN [Thread-940 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK] 2024-11-10T13:00:51,346 WARN [Thread-940 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:51,346 WARN [Thread-940 {}] hdfs.DataStreamer(1731): Error Recovery for BP-925157896-172.17.0.2-1731243630326:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40561,DS-ce5a4fa7-c07a-4ea4-8fa2-dd4afc17cb96,DISK], DatanodeInfoWithStorage[127.0.0.1:34495,DS-6280841e-9118-428f-9e2a-14ca17e63f55,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40561,DS-ce5a4fa7-c07a-4ea4-8fa2-dd4afc17cb96,DISK]) is bad. 2024-11-10T13:00:51,346 WARN [Thread-940 {}] hdfs.DataStreamer(1850): Abandoning BP-925157896-172.17.0.2-1731243630326:blk_1073741864_1047 2024-11-10T13:00:51,347 WARN [Thread-940 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40561,DS-ce5a4fa7-c07a-4ea4-8fa2-dd4afc17cb96,DISK] 2024-11-10T13:00:51,349 WARN [Thread-940 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33513 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:51,349 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1012691277_22 at /127.0.0.1:60698 [Receiving block BP-925157896-172.17.0.2-1731243630326:blk_1073741865_1048] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/cluster_ca3516ed-3462-34cb-04d5-c3857eb856b7/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/cluster_ca3516ed-3462-34cb-04d5-c3857eb856b7/data/data8]'}, localName='127.0.0.1:34495', datanodeUuid='a1f556c6-aae3-4ee9-826c-02aac278b22e', xmitsInProgress=0}:Exception transferring block BP-925157896-172.17.0.2-1731243630326:blk_1073741865_1048 to mirror 127.0.0.1:33513 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:00:51,349 WARN [Thread-940 {}] hdfs.DataStreamer(1731): Error Recovery for BP-925157896-172.17.0.2-1731243630326:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34495,DS-6280841e-9118-428f-9e2a-14ca17e63f55,DISK], DatanodeInfoWithStorage[127.0.0.1:33513,DS-b5f80209-4e09-49c3-8a4c-611ce85f484d,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33513,DS-b5f80209-4e09-49c3-8a4c-611ce85f484d,DISK]) is bad. 2024-11-10T13:00:51,349 WARN [Thread-940 {}] hdfs.DataStreamer(1850): Abandoning BP-925157896-172.17.0.2-1731243630326:blk_1073741865_1048 2024-11-10T13:00:51,349 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1012691277_22 at /127.0.0.1:60698 [Receiving block BP-925157896-172.17.0.2-1731243630326:blk_1073741865_1048] {}] datanode.BlockReceiver(316): Block 1073741865 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-10T13:00:51,349 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1012691277_22 at /127.0.0.1:60698 [Receiving block BP-925157896-172.17.0.2-1731243630326:blk_1073741865_1048] {}] datanode.DataXceiver(331): 127.0.0.1:34495:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60698 dst: /127.0.0.1:34495 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:00:51,349 WARN [Thread-940 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33513,DS-b5f80209-4e09-49c3-8a4c-611ce85f484d,DISK] 2024-11-10T13:00:51,350 WARN [IPC Server handler 1 on default port 35903 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-10T13:00:51,350 WARN [IPC Server handler 1 on default port 35903 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-10T13:00:51,350 WARN [IPC Server handler 1 on default port 35903 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-10T13:00:51,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34495 is added to blk_1073741866_1049 (size=17994) 2024-11-10T13:00:51,626 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@9f37e[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34495, datanodeUuid=a1f556c6-aae3-4ee9-826c-02aac278b22e, infoPort=46285, infoSecurePort=0, ipcPort=42993, storageInfo=lv=-57;cid=testClusterID;nsid=989579585;c=1731243630326):Failed to transfer BP-925157896-172.17.0.2-1731243630326:blk_1073741846_1029 to 127.0.0.1:33513 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:00:51,626 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@575bb0f2[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34495, datanodeUuid=a1f556c6-aae3-4ee9-826c-02aac278b22e, infoPort=46285, infoSecurePort=0, ipcPort=42993, storageInfo=lv=-57;cid=testClusterID;nsid=989579585;c=1731243630326):Failed to transfer BP-925157896-172.17.0.2-1731243630326:blk_1073741851_1034 to 127.0.0.1:33513 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:00:51,760 DEBUG [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/.tmp/info/56750e6709c448c180f63ea6df91676a as hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/info/56750e6709c448c180f63ea6df91676a 2024-11-10T13:00:51,767 INFO [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 420a6a06ae441e38de6228ffc018d528/info of 420a6a06ae441e38de6228ffc018d528 into 56750e6709c448c180f63ea6df91676a(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-10T13:00:51,767 DEBUG [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 420a6a06ae441e38de6228ffc018d528: 2024-11-10T13:00:51,767 INFO [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731243632170.420a6a06ae441e38de6228ffc018d528., storeName=420a6a06ae441e38de6228ffc018d528/info, priority=13, startTime=1731243651325; duration=0sec 2024-11-10T13:00:51,767 DEBUG [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-10T13:00:51,767 DEBUG [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-10T13:00:51,767 DEBUG [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/info/56750e6709c448c180f63ea6df91676a because midkey is the same as first or last row 2024-11-10T13:00:51,767 DEBUG [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-10T13:00:51,767 DEBUG [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-10T13:00:51,767 DEBUG [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/info/56750e6709c448c180f63ea6df91676a because midkey is the same as first or last row 2024-11-10T13:00:51,767 DEBUG [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-10T13:00:51,767 DEBUG [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-10T13:00:51,768 DEBUG [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/info/56750e6709c448c180f63ea6df91676a because midkey is the same as first or last row 2024-11-10T13:00:51,768 DEBUG [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-10T13:00:51,768 DEBUG [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 420a6a06ae441e38de6228ffc018d528:info 2024-11-10T13:00:52,138 INFO [regionserver/3857ccc89b65:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:52,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44789 {}] regionserver.HRegion(8855): Flush requested on 420a6a06ae441e38de6228ffc018d528 2024-11-10T13:00:52,314 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 420a6a06ae441e38de6228ffc018d528 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-10T13:00:52,318 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/.tmp/info/751690d8d228490c83c7e0abfbe81e08 is 1079, key is tmprow/info:/1731243652312/Put/seqid=0 2024-11-10T13:00:52,320 WARN [Thread-948 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741867_1050 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:52,320 WARN [Thread-948 {}] hdfs.DataStreamer(1731): Error Recovery for BP-925157896-172.17.0.2-1731243630326:blk_1073741867_1050 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK], DatanodeInfoWithStorage[127.0.0.1:40561,DS-ce5a4fa7-c07a-4ea4-8fa2-dd4afc17cb96,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]) is bad. 2024-11-10T13:00:52,320 WARN [Thread-948 {}] hdfs.DataStreamer(1850): Abandoning BP-925157896-172.17.0.2-1731243630326:blk_1073741867_1050 2024-11-10T13:00:52,321 WARN [Thread-948 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK] 2024-11-10T13:00:52,322 WARN [Thread-948 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741868_1051 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:52,322 WARN [Thread-948 {}] hdfs.DataStreamer(1731): Error Recovery for BP-925157896-172.17.0.2-1731243630326:blk_1073741868_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40561,DS-ce5a4fa7-c07a-4ea4-8fa2-dd4afc17cb96,DISK], DatanodeInfoWithStorage[127.0.0.1:33513,DS-b5f80209-4e09-49c3-8a4c-611ce85f484d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40561,DS-ce5a4fa7-c07a-4ea4-8fa2-dd4afc17cb96,DISK]) is bad. 2024-11-10T13:00:52,322 WARN [Thread-948 {}] hdfs.DataStreamer(1850): Abandoning BP-925157896-172.17.0.2-1731243630326:blk_1073741868_1051 2024-11-10T13:00:52,323 WARN [Thread-948 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40561,DS-ce5a4fa7-c07a-4ea4-8fa2-dd4afc17cb96,DISK] 2024-11-10T13:00:52,324 WARN [Thread-948 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:52,324 WARN [Thread-948 {}] hdfs.DataStreamer(1731): Error Recovery for BP-925157896-172.17.0.2-1731243630326:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35391,DS-7d81c0d1-69f3-4b0a-a084-b628cd80149c,DISK], DatanodeInfoWithStorage[127.0.0.1:34495,DS-6280841e-9118-428f-9e2a-14ca17e63f55,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35391,DS-7d81c0d1-69f3-4b0a-a084-b628cd80149c,DISK]) is bad. 2024-11-10T13:00:52,324 WARN [Thread-948 {}] hdfs.DataStreamer(1850): Abandoning BP-925157896-172.17.0.2-1731243630326:blk_1073741869_1052 2024-11-10T13:00:52,325 WARN [Thread-948 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35391,DS-7d81c0d1-69f3-4b0a-a084-b628cd80149c,DISK] 2024-11-10T13:00:52,326 WARN [Thread-948 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:52,326 WARN [Thread-948 {}] hdfs.DataStreamer(1731): Error Recovery for BP-925157896-172.17.0.2-1731243630326:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33513,DS-b5f80209-4e09-49c3-8a4c-611ce85f484d,DISK], DatanodeInfoWithStorage[127.0.0.1:34495,DS-6280841e-9118-428f-9e2a-14ca17e63f55,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33513,DS-b5f80209-4e09-49c3-8a4c-611ce85f484d,DISK]) is bad. 2024-11-10T13:00:52,326 WARN [Thread-948 {}] hdfs.DataStreamer(1850): Abandoning BP-925157896-172.17.0.2-1731243630326:blk_1073741870_1053 2024-11-10T13:00:52,327 WARN [Thread-948 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33513,DS-b5f80209-4e09-49c3-8a4c-611ce85f484d,DISK] 2024-11-10T13:00:52,327 WARN [IPC Server handler 3 on default port 35903 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-10T13:00:52,327 WARN [IPC Server handler 3 on default port 35903 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-10T13:00:52,327 WARN [IPC Server handler 3 on default port 35903 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-10T13:00:52,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34495 is added to blk_1073741871_1054 (size=6027) 2024-11-10T13:00:52,331 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/.tmp/info/751690d8d228490c83c7e0abfbe81e08 2024-11-10T13:00:52,337 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/.tmp/info/751690d8d228490c83c7e0abfbe81e08 as hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/info/751690d8d228490c83c7e0abfbe81e08 2024-11-10T13:00:52,343 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/info/751690d8d228490c83c7e0abfbe81e08, entries=1, sequenceid=45, filesize=5.9 K 2024-11-10T13:00:52,344 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 420a6a06ae441e38de6228ffc018d528 in 30ms, sequenceid=45, compaction requested=false 2024-11-10T13:00:52,344 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 420a6a06ae441e38de6228ffc018d528: 2024-11-10T13:00:52,344 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-11-10T13:00:52,344 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-10T13:00:52,344 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/info/56750e6709c448c180f63ea6df91676a because midkey is the same as first or last row 2024-11-10T13:00:52,616 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@9f37e[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34495, datanodeUuid=a1f556c6-aae3-4ee9-826c-02aac278b22e, infoPort=46285, infoSecurePort=0, ipcPort=42993, storageInfo=lv=-57;cid=testClusterID;nsid=989579585;c=1731243630326):Failed to transfer BP-925157896-172.17.0.2-1731243630326:blk_1073741861_1044 to 127.0.0.1:40561 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:00:52,616 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@575bb0f2[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34495, datanodeUuid=a1f556c6-aae3-4ee9-826c-02aac278b22e, infoPort=46285, infoSecurePort=0, ipcPort=42993, storageInfo=lv=-57;cid=testClusterID;nsid=989579585;c=1731243630326):Failed to transfer BP-925157896-172.17.0.2-1731243630326:blk_1073741841_1024 to 127.0.0.1:40725 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:00:52,853 INFO [regionserver/3857ccc89b65:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:52,853 WARN [regionserver/3857ccc89b65:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34495,DS-6280841e-9118-428f-9e2a-14ca17e63f55,DISK]] 2024-11-10T13:00:52,853 DEBUG [regionserver/3857ccc89b65:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3857ccc89b65%2C44789%2C1731243631098:(num 1731243650835) roll requested 2024-11-10T13:00:52,854 INFO [regionserver/3857ccc89b65:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3857ccc89b65%2C44789%2C1731243631098.1731243652854 2024-11-10T13:00:52,857 WARN [Thread-952 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741872_1055 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40561 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:52,857 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1012691277_22 at /127.0.0.1:60726 [Receiving block BP-925157896-172.17.0.2-1731243630326:blk_1073741872_1055] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/cluster_ca3516ed-3462-34cb-04d5-c3857eb856b7/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/cluster_ca3516ed-3462-34cb-04d5-c3857eb856b7/data/data8]'}, localName='127.0.0.1:34495', datanodeUuid='a1f556c6-aae3-4ee9-826c-02aac278b22e', xmitsInProgress=0}:Exception transferring block BP-925157896-172.17.0.2-1731243630326:blk_1073741872_1055 to mirror 127.0.0.1:40561 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:00:52,858 WARN [Thread-952 {}] hdfs.DataStreamer(1731): Error Recovery for BP-925157896-172.17.0.2-1731243630326:blk_1073741872_1055 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34495,DS-6280841e-9118-428f-9e2a-14ca17e63f55,DISK], DatanodeInfoWithStorage[127.0.0.1:40561,DS-ce5a4fa7-c07a-4ea4-8fa2-dd4afc17cb96,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40561,DS-ce5a4fa7-c07a-4ea4-8fa2-dd4afc17cb96,DISK]) is bad. 2024-11-10T13:00:52,858 WARN [Thread-952 {}] hdfs.DataStreamer(1850): Abandoning BP-925157896-172.17.0.2-1731243630326:blk_1073741872_1055 2024-11-10T13:00:52,858 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1012691277_22 at /127.0.0.1:60726 [Receiving block BP-925157896-172.17.0.2-1731243630326:blk_1073741872_1055] {}] datanode.BlockReceiver(316): Block 1073741872 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-10T13:00:52,858 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1012691277_22 at /127.0.0.1:60726 [Receiving block BP-925157896-172.17.0.2-1731243630326:blk_1073741872_1055] {}] datanode.DataXceiver(331): 127.0.0.1:34495:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60726 dst: /127.0.0.1:34495 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:00:52,858 WARN [Thread-952 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40561,DS-ce5a4fa7-c07a-4ea4-8fa2-dd4afc17cb96,DISK] 2024-11-10T13:00:52,859 WARN [Thread-952 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741873_1056 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:52,860 WARN [Thread-952 {}] hdfs.DataStreamer(1731): Error Recovery for BP-925157896-172.17.0.2-1731243630326:blk_1073741873_1056 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK], DatanodeInfoWithStorage[127.0.0.1:34495,DS-6280841e-9118-428f-9e2a-14ca17e63f55,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]) is bad. 2024-11-10T13:00:52,860 WARN [Thread-952 {}] hdfs.DataStreamer(1850): Abandoning BP-925157896-172.17.0.2-1731243630326:blk_1073741873_1056 2024-11-10T13:00:52,860 WARN [Thread-952 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK] 2024-11-10T13:00:52,862 WARN [Thread-952 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33513 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:52,862 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1012691277_22 at /127.0.0.1:60732 [Receiving block BP-925157896-172.17.0.2-1731243630326:blk_1073741874_1057] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/cluster_ca3516ed-3462-34cb-04d5-c3857eb856b7/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/cluster_ca3516ed-3462-34cb-04d5-c3857eb856b7/data/data8]'}, localName='127.0.0.1:34495', datanodeUuid='a1f556c6-aae3-4ee9-826c-02aac278b22e', xmitsInProgress=0}:Exception transferring block BP-925157896-172.17.0.2-1731243630326:blk_1073741874_1057 to mirror 127.0.0.1:33513 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:00:52,862 WARN [Thread-952 {}] hdfs.DataStreamer(1731): Error Recovery for BP-925157896-172.17.0.2-1731243630326:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34495,DS-6280841e-9118-428f-9e2a-14ca17e63f55,DISK], DatanodeInfoWithStorage[127.0.0.1:33513,DS-b5f80209-4e09-49c3-8a4c-611ce85f484d,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33513,DS-b5f80209-4e09-49c3-8a4c-611ce85f484d,DISK]) is bad. 2024-11-10T13:00:52,862 WARN [Thread-952 {}] hdfs.DataStreamer(1850): Abandoning BP-925157896-172.17.0.2-1731243630326:blk_1073741874_1057 2024-11-10T13:00:52,862 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1012691277_22 at /127.0.0.1:60732 [Receiving block BP-925157896-172.17.0.2-1731243630326:blk_1073741874_1057] {}] datanode.BlockReceiver(316): Block 1073741874 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-10T13:00:52,862 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1012691277_22 at /127.0.0.1:60732 [Receiving block BP-925157896-172.17.0.2-1731243630326:blk_1073741874_1057] {}] datanode.DataXceiver(331): 127.0.0.1:34495:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60732 dst: /127.0.0.1:34495 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:00:52,862 WARN [Thread-952 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33513,DS-b5f80209-4e09-49c3-8a4c-611ce85f484d,DISK] 2024-11-10T13:00:52,863 WARN [Thread-952 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:52,863 WARN [Thread-952 {}] hdfs.DataStreamer(1731): Error Recovery for BP-925157896-172.17.0.2-1731243630326:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35391,DS-7d81c0d1-69f3-4b0a-a084-b628cd80149c,DISK], DatanodeInfoWithStorage[127.0.0.1:34495,DS-6280841e-9118-428f-9e2a-14ca17e63f55,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35391,DS-7d81c0d1-69f3-4b0a-a084-b628cd80149c,DISK]) is bad. 2024-11-10T13:00:52,863 WARN [Thread-952 {}] hdfs.DataStreamer(1850): Abandoning BP-925157896-172.17.0.2-1731243630326:blk_1073741875_1058 2024-11-10T13:00:52,864 WARN [Thread-952 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35391,DS-7d81c0d1-69f3-4b0a-a084-b628cd80149c,DISK] 2024-11-10T13:00:52,864 WARN [IPC Server handler 2 on default port 35903 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-10T13:00:52,864 WARN [IPC Server handler 2 on default port 35903 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-10T13:00:52,865 WARN [IPC Server handler 2 on default port 35903 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-10T13:00:52,867 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:52,867 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:52,867 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:52,867 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:52,867 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:00:52,867 INFO [regionserver/3857ccc89b65:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.1731243650835 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.1731243652854 2024-11-10T13:00:52,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34495 is added to blk_1073741856_1039 (size=13591) 2024-11-10T13:00:52,870 DEBUG [regionserver/3857ccc89b65:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46285:46285)] 2024-11-10T13:00:52,870 DEBUG [regionserver/3857ccc89b65:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.1731243631502 is not closed yet, will try archiving it next time 2024-11-10T13:00:52,870 DEBUG [regionserver/3857ccc89b65:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.1731243650835 is not closed yet, will try archiving it next time 2024-11-10T13:00:52,871 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.1731243646824 to hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/oldWALs/3857ccc89b65%2C44789%2C1731243631098.1731243646824 2024-11-10T13:00:53,180 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:53,270 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.1731243631502 is not closed yet, will try archiving it next time 2024-11-10T13:00:53,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44789 {}] regionserver.HRegion(8855): Flush requested on 420a6a06ae441e38de6228ffc018d528 2024-11-10T13:00:53,732 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 420a6a06ae441e38de6228ffc018d528 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-10T13:00:53,737 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/.tmp/info/340053af235f4b7bb4f408fe10459238 is 1079, key is tmprow/info:/1731243653731/Put/seqid=0 2024-11-10T13:00:53,738 WARN [Thread-958 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741877_1060 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:53,738 WARN [Thread-958 {}] hdfs.DataStreamer(1731): Error Recovery for BP-925157896-172.17.0.2-1731243630326:blk_1073741877_1060 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK], DatanodeInfoWithStorage[127.0.0.1:33513,DS-b5f80209-4e09-49c3-8a4c-611ce85f484d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]) is bad. 2024-11-10T13:00:53,738 WARN [Thread-958 {}] hdfs.DataStreamer(1850): Abandoning BP-925157896-172.17.0.2-1731243630326:blk_1073741877_1060 2024-11-10T13:00:53,739 WARN [Thread-958 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK] 2024-11-10T13:00:53,740 WARN [Thread-958 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741878_1061 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:53,740 WARN [Thread-958 {}] hdfs.DataStreamer(1731): Error Recovery for BP-925157896-172.17.0.2-1731243630326:blk_1073741878_1061 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40561,DS-ce5a4fa7-c07a-4ea4-8fa2-dd4afc17cb96,DISK], DatanodeInfoWithStorage[127.0.0.1:33513,DS-b5f80209-4e09-49c3-8a4c-611ce85f484d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40561,DS-ce5a4fa7-c07a-4ea4-8fa2-dd4afc17cb96,DISK]) is bad. 2024-11-10T13:00:53,740 WARN [Thread-958 {}] hdfs.DataStreamer(1850): Abandoning BP-925157896-172.17.0.2-1731243630326:blk_1073741878_1061 2024-11-10T13:00:53,741 WARN [Thread-958 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40561,DS-ce5a4fa7-c07a-4ea4-8fa2-dd4afc17cb96,DISK] 2024-11-10T13:00:53,742 WARN [Thread-958 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1062 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:53,742 WARN [Thread-958 {}] hdfs.DataStreamer(1731): Error Recovery for BP-925157896-172.17.0.2-1731243630326:blk_1073741879_1062 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35391,DS-7d81c0d1-69f3-4b0a-a084-b628cd80149c,DISK], DatanodeInfoWithStorage[127.0.0.1:33513,DS-b5f80209-4e09-49c3-8a4c-611ce85f484d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35391,DS-7d81c0d1-69f3-4b0a-a084-b628cd80149c,DISK]) is bad. 2024-11-10T13:00:53,742 WARN [Thread-958 {}] hdfs.DataStreamer(1850): Abandoning BP-925157896-172.17.0.2-1731243630326:blk_1073741879_1062 2024-11-10T13:00:53,742 WARN [Thread-958 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35391,DS-7d81c0d1-69f3-4b0a-a084-b628cd80149c,DISK] 2024-11-10T13:00:53,744 WARN [Thread-958 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741880_1063 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33513 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:53,744 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1012691277_22 at /127.0.0.1:60738 [Receiving block BP-925157896-172.17.0.2-1731243630326:blk_1073741880_1063] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/cluster_ca3516ed-3462-34cb-04d5-c3857eb856b7/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/cluster_ca3516ed-3462-34cb-04d5-c3857eb856b7/data/data8]'}, localName='127.0.0.1:34495', datanodeUuid='a1f556c6-aae3-4ee9-826c-02aac278b22e', xmitsInProgress=0}:Exception transferring block BP-925157896-172.17.0.2-1731243630326:blk_1073741880_1063 to mirror 127.0.0.1:33513 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:00:53,745 WARN [Thread-958 {}] hdfs.DataStreamer(1731): Error Recovery for BP-925157896-172.17.0.2-1731243630326:blk_1073741880_1063 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34495,DS-6280841e-9118-428f-9e2a-14ca17e63f55,DISK], DatanodeInfoWithStorage[127.0.0.1:33513,DS-b5f80209-4e09-49c3-8a4c-611ce85f484d,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33513,DS-b5f80209-4e09-49c3-8a4c-611ce85f484d,DISK]) is bad. 2024-11-10T13:00:53,745 WARN [Thread-958 {}] hdfs.DataStreamer(1850): Abandoning BP-925157896-172.17.0.2-1731243630326:blk_1073741880_1063 2024-11-10T13:00:53,745 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1012691277_22 at /127.0.0.1:60738 [Receiving block BP-925157896-172.17.0.2-1731243630326:blk_1073741880_1063] {}] datanode.BlockReceiver(316): Block 1073741880 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-10T13:00:53,745 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1012691277_22 at /127.0.0.1:60738 [Receiving block BP-925157896-172.17.0.2-1731243630326:blk_1073741880_1063] {}] datanode.DataXceiver(331): 127.0.0.1:34495:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60738 dst: /127.0.0.1:34495 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:00:53,745 WARN [Thread-958 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33513,DS-b5f80209-4e09-49c3-8a4c-611ce85f484d,DISK] 2024-11-10T13:00:53,746 WARN [IPC Server handler 1 on default port 35903 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-10T13:00:53,746 WARN [IPC Server handler 1 on default port 35903 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-10T13:00:53,746 WARN [IPC Server handler 1 on default port 35903 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-10T13:00:53,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34495 is added to blk_1073741881_1064 (size=6027) 2024-11-10T13:00:54,138 INFO [regionserver/3857ccc89b65:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:54,150 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/.tmp/info/340053af235f4b7bb4f408fe10459238 2024-11-10T13:00:54,156 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/.tmp/info/340053af235f4b7bb4f408fe10459238 as hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/info/340053af235f4b7bb4f408fe10459238 2024-11-10T13:00:54,161 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/info/340053af235f4b7bb4f408fe10459238, entries=1, sequenceid=55, filesize=5.9 K 2024-11-10T13:00:54,162 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 420a6a06ae441e38de6228ffc018d528 in 430ms, sequenceid=55, compaction requested=true 2024-11-10T13:00:54,162 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 420a6a06ae441e38de6228ffc018d528: 2024-11-10T13:00:54,162 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=29.3 K, sizeToCheck=16.0 K 2024-11-10T13:00:54,163 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-10T13:00:54,163 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/info/56750e6709c448c180f63ea6df91676a because midkey is the same as first or last row 2024-11-10T13:00:54,163 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 420a6a06ae441e38de6228ffc018d528:info, priority=-2147483648, current under compaction store size is 1 2024-11-10T13:00:54,163 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-10T13:00:54,163 DEBUG [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-10T13:00:54,164 DEBUG [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 30048 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-10T13:00:54,164 DEBUG [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] regionserver.HStore(1541): 420a6a06ae441e38de6228ffc018d528/info is initiating minor compaction (all files) 2024-11-10T13:00:54,164 INFO [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 420a6a06ae441e38de6228ffc018d528/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731243632170.420a6a06ae441e38de6228ffc018d528. 2024-11-10T13:00:54,164 INFO [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/info/56750e6709c448c180f63ea6df91676a, hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/info/751690d8d228490c83c7e0abfbe81e08, hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/info/340053af235f4b7bb4f408fe10459238] into tmpdir=hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/.tmp, totalSize=29.3 K 2024-11-10T13:00:54,165 DEBUG [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] compactions.Compactor(225): Compacting 56750e6709c448c180f63ea6df91676a, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1731243644833 2024-11-10T13:00:54,165 DEBUG [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] compactions.Compactor(225): Compacting 751690d8d228490c83c7e0abfbe81e08, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1731243652312 2024-11-10T13:00:54,165 DEBUG [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] compactions.Compactor(225): Compacting 340053af235f4b7bb4f408fe10459238, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1731243653731 2024-11-10T13:00:54,179 INFO [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 420a6a06ae441e38de6228ffc018d528#info#compaction#24 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-10T13:00:54,179 DEBUG [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/.tmp/info/9f9e0d8d74dc438bbc30b2205e6686bf is 1080, key is row0002/info:/1731243644833/Put/seqid=0 2024-11-10T13:00:54,182 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1012691277_22 at /127.0.0.1:60760 [Receiving block BP-925157896-172.17.0.2-1731243630326:blk_1073741882_1065] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/cluster_ca3516ed-3462-34cb-04d5-c3857eb856b7/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/cluster_ca3516ed-3462-34cb-04d5-c3857eb856b7/data/data8]'}, localName='127.0.0.1:34495', datanodeUuid='a1f556c6-aae3-4ee9-826c-02aac278b22e', xmitsInProgress=0}:Exception transferring block BP-925157896-172.17.0.2-1731243630326:blk_1073741882_1065 to mirror 127.0.0.1:40561 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:00:54,182 WARN [Thread-963 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741882_1065 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40561 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:54,182 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1012691277_22 at /127.0.0.1:60760 [Receiving block BP-925157896-172.17.0.2-1731243630326:blk_1073741882_1065] {}] datanode.BlockReceiver(316): Block 1073741882 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-10T13:00:54,182 WARN [Thread-963 {}] hdfs.DataStreamer(1731): Error Recovery for BP-925157896-172.17.0.2-1731243630326:blk_1073741882_1065 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34495,DS-6280841e-9118-428f-9e2a-14ca17e63f55,DISK], DatanodeInfoWithStorage[127.0.0.1:40561,DS-ce5a4fa7-c07a-4ea4-8fa2-dd4afc17cb96,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40561,DS-ce5a4fa7-c07a-4ea4-8fa2-dd4afc17cb96,DISK]) is bad. 2024-11-10T13:00:54,182 WARN [Thread-963 {}] hdfs.DataStreamer(1850): Abandoning BP-925157896-172.17.0.2-1731243630326:blk_1073741882_1065 2024-11-10T13:00:54,182 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1012691277_22 at /127.0.0.1:60760 [Receiving block BP-925157896-172.17.0.2-1731243630326:blk_1073741882_1065] {}] datanode.DataXceiver(331): 127.0.0.1:34495:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60760 dst: /127.0.0.1:34495 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:00:54,183 WARN [Thread-963 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40561,DS-ce5a4fa7-c07a-4ea4-8fa2-dd4afc17cb96,DISK] 2024-11-10T13:00:54,184 WARN [Thread-963 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741883_1066 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40725 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:54,184 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1012691277_22 at /127.0.0.1:60774 [Receiving block BP-925157896-172.17.0.2-1731243630326:blk_1073741883_1066] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/cluster_ca3516ed-3462-34cb-04d5-c3857eb856b7/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/cluster_ca3516ed-3462-34cb-04d5-c3857eb856b7/data/data8]'}, localName='127.0.0.1:34495', datanodeUuid='a1f556c6-aae3-4ee9-826c-02aac278b22e', xmitsInProgress=0}:Exception transferring block BP-925157896-172.17.0.2-1731243630326:blk_1073741883_1066 to mirror 127.0.0.1:40725 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:00:54,184 WARN [Thread-963 {}] hdfs.DataStreamer(1731): Error Recovery for BP-925157896-172.17.0.2-1731243630326:blk_1073741883_1066 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34495,DS-6280841e-9118-428f-9e2a-14ca17e63f55,DISK], DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]) is bad. 2024-11-10T13:00:54,185 WARN [Thread-963 {}] hdfs.DataStreamer(1850): Abandoning BP-925157896-172.17.0.2-1731243630326:blk_1073741883_1066 2024-11-10T13:00:54,185 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1012691277_22 at /127.0.0.1:60774 [Receiving block BP-925157896-172.17.0.2-1731243630326:blk_1073741883_1066] {}] datanode.BlockReceiver(316): Block 1073741883 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-10T13:00:54,185 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1012691277_22 at /127.0.0.1:60774 [Receiving block BP-925157896-172.17.0.2-1731243630326:blk_1073741883_1066] {}] datanode.DataXceiver(331): 127.0.0.1:34495:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60774 dst: /127.0.0.1:34495 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:00:54,185 WARN [Thread-963 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK] 2024-11-10T13:00:54,187 WARN [Thread-963 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741884_1067 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35391 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:54,187 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1012691277_22 at /127.0.0.1:60782 [Receiving block BP-925157896-172.17.0.2-1731243630326:blk_1073741884_1067] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/cluster_ca3516ed-3462-34cb-04d5-c3857eb856b7/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/cluster_ca3516ed-3462-34cb-04d5-c3857eb856b7/data/data8]'}, localName='127.0.0.1:34495', datanodeUuid='a1f556c6-aae3-4ee9-826c-02aac278b22e', xmitsInProgress=0}:Exception transferring block BP-925157896-172.17.0.2-1731243630326:blk_1073741884_1067 to mirror 127.0.0.1:35391 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:00:54,187 WARN [Thread-963 {}] hdfs.DataStreamer(1731): Error Recovery for BP-925157896-172.17.0.2-1731243630326:blk_1073741884_1067 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34495,DS-6280841e-9118-428f-9e2a-14ca17e63f55,DISK], DatanodeInfoWithStorage[127.0.0.1:35391,DS-7d81c0d1-69f3-4b0a-a084-b628cd80149c,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35391,DS-7d81c0d1-69f3-4b0a-a084-b628cd80149c,DISK]) is bad. 2024-11-10T13:00:54,187 WARN [Thread-963 {}] hdfs.DataStreamer(1850): Abandoning BP-925157896-172.17.0.2-1731243630326:blk_1073741884_1067 2024-11-10T13:00:54,187 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1012691277_22 at /127.0.0.1:60782 [Receiving block BP-925157896-172.17.0.2-1731243630326:blk_1073741884_1067] {}] datanode.BlockReceiver(316): Block 1073741884 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-10T13:00:54,187 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1012691277_22 at /127.0.0.1:60782 [Receiving block BP-925157896-172.17.0.2-1731243630326:blk_1073741884_1067] {}] datanode.DataXceiver(331): 127.0.0.1:34495:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60782 dst: /127.0.0.1:34495 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:00:54,187 WARN [Thread-963 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35391,DS-7d81c0d1-69f3-4b0a-a084-b628cd80149c,DISK] 2024-11-10T13:00:54,189 WARN [Thread-963 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741885_1068 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33513 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:54,189 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1012691277_22 at /127.0.0.1:60786 [Receiving block BP-925157896-172.17.0.2-1731243630326:blk_1073741885_1068] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/cluster_ca3516ed-3462-34cb-04d5-c3857eb856b7/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/cluster_ca3516ed-3462-34cb-04d5-c3857eb856b7/data/data8]'}, localName='127.0.0.1:34495', datanodeUuid='a1f556c6-aae3-4ee9-826c-02aac278b22e', xmitsInProgress=0}:Exception transferring block BP-925157896-172.17.0.2-1731243630326:blk_1073741885_1068 to mirror 127.0.0.1:33513 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:00:54,189 WARN [Thread-963 {}] hdfs.DataStreamer(1731): Error Recovery for BP-925157896-172.17.0.2-1731243630326:blk_1073741885_1068 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34495,DS-6280841e-9118-428f-9e2a-14ca17e63f55,DISK], DatanodeInfoWithStorage[127.0.0.1:33513,DS-b5f80209-4e09-49c3-8a4c-611ce85f484d,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33513,DS-b5f80209-4e09-49c3-8a4c-611ce85f484d,DISK]) is bad. 2024-11-10T13:00:54,189 WARN [Thread-963 {}] hdfs.DataStreamer(1850): Abandoning BP-925157896-172.17.0.2-1731243630326:blk_1073741885_1068 2024-11-10T13:00:54,189 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1012691277_22 at /127.0.0.1:60786 [Receiving block BP-925157896-172.17.0.2-1731243630326:blk_1073741885_1068] {}] datanode.BlockReceiver(316): Block 1073741885 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-10T13:00:54,189 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1012691277_22 at /127.0.0.1:60786 [Receiving block BP-925157896-172.17.0.2-1731243630326:blk_1073741885_1068] {}] datanode.DataXceiver(331): 127.0.0.1:34495:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60786 dst: /127.0.0.1:34495 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:00:54,190 WARN [Thread-963 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33513,DS-b5f80209-4e09-49c3-8a4c-611ce85f484d,DISK] 2024-11-10T13:00:54,190 WARN [IPC Server handler 2 on default port 35903 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-10T13:00:54,190 WARN [IPC Server handler 2 on default port 35903 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-10T13:00:54,190 WARN [IPC Server handler 2 on default port 35903 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-10T13:00:54,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34495 is added to blk_1073741886_1069 (size=18097) 2024-11-10T13:00:54,600 DEBUG [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/.tmp/info/9f9e0d8d74dc438bbc30b2205e6686bf as hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/info/9f9e0d8d74dc438bbc30b2205e6686bf 2024-11-10T13:00:54,607 INFO [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 420a6a06ae441e38de6228ffc018d528/info of 420a6a06ae441e38de6228ffc018d528 into 9f9e0d8d74dc438bbc30b2205e6686bf(size=17.7 K), total size for store is 17.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-10T13:00:54,607 DEBUG [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 420a6a06ae441e38de6228ffc018d528: 2024-11-10T13:00:54,607 INFO [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731243632170.420a6a06ae441e38de6228ffc018d528., storeName=420a6a06ae441e38de6228ffc018d528/info, priority=13, startTime=1731243654163; duration=0sec 2024-11-10T13:00:54,607 DEBUG [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-10T13:00:54,607 DEBUG [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-10T13:00:54,607 DEBUG [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/info/9f9e0d8d74dc438bbc30b2205e6686bf because midkey is the same as first or last row 2024-11-10T13:00:54,607 DEBUG [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-10T13:00:54,607 DEBUG [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-10T13:00:54,607 DEBUG [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/info/9f9e0d8d74dc438bbc30b2205e6686bf because midkey is the same as first or last row 2024-11-10T13:00:54,607 DEBUG [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-10T13:00:54,608 DEBUG [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-10T13:00:54,608 DEBUG [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/info/9f9e0d8d74dc438bbc30b2205e6686bf because midkey is the same as first or last row 2024-11-10T13:00:54,608 DEBUG [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-10T13:00:54,608 DEBUG [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 420a6a06ae441e38de6228ffc018d528:info 2024-11-10T13:00:54,615 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@575bb0f2[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34495, datanodeUuid=a1f556c6-aae3-4ee9-826c-02aac278b22e, infoPort=46285, infoSecurePort=0, ipcPort=42993, storageInfo=lv=-57;cid=testClusterID;nsid=989579585;c=1731243630326):Failed to transfer BP-925157896-172.17.0.2-1731243630326:blk_1073741866_1049 to 127.0.0.1:35391 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:00:54,615 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@9f37e[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34495, datanodeUuid=a1f556c6-aae3-4ee9-826c-02aac278b22e, infoPort=46285, infoSecurePort=0, ipcPort=42993, storageInfo=lv=-57;cid=testClusterID;nsid=989579585;c=1731243630326):Failed to transfer BP-925157896-172.17.0.2-1731243630326:blk_1073741871_1054 to 127.0.0.1:40561 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:00:54,871 INFO [regionserver/3857ccc89b65:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:54,871 WARN [regionserver/3857ccc89b65:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-11-10T13:00:54,954 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T13:00:54,958 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T13:00:54,958 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T13:00:54,958 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T13:00:54,958 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-10T13:00:54,959 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6869cf12{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/hadoop.log.dir/,AVAILABLE} 2024-11-10T13:00:54,959 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@39eb7ad6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T13:00:55,073 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2c79190f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/java.io.tmpdir/jetty-localhost-44195-hadoop-hdfs-3_4_1-tests_jar-_-any-10806010746451965345/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T13:00:55,073 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@70a14b1c{HTTP/1.1, (http/1.1)}{localhost:44195} 2024-11-10T13:00:55,073 INFO [Time-limited test {}] server.Server(415): Started @128044ms 2024-11-10T13:00:55,075 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-10T13:00:55,174 WARN [Thread-986 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-10T13:00:55,181 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:55,182 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1b9119c7b42763fb with lease ID 0x25040637d64d6314: from storage DS-7d81c0d1-69f3-4b0a-a084-b628cd80149c node DatanodeRegistration(127.0.0.1:44447, datanodeUuid=07be666f-232a-4760-aea5-e1fb3b4f419c, infoPort=38535, infoSecurePort=0, ipcPort=39161, storageInfo=lv=-57;cid=testClusterID;nsid=989579585;c=1731243630326), blocks: 7, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-10T13:00:55,182 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1b9119c7b42763fb with lease ID 0x25040637d64d6314: from storage DS-681b34e1-0a0d-4916-a216-ed7f44fc852a node DatanodeRegistration(127.0.0.1:44447, datanodeUuid=07be666f-232a-4760-aea5-e1fb3b4f419c, infoPort=38535, infoSecurePort=0, ipcPort=39161, storageInfo=lv=-57;cid=testClusterID;nsid=989579585;c=1731243630326), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T13:00:55,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44447 is added to blk_1073741856_1039 (size=13591) 2024-11-10T13:00:55,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44447 is added to blk_1073741881_1064 (size=6027) 2024-11-10T13:00:56,138 INFO [regionserver/3857ccc89b65:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:56,871 INFO [regionserver/3857ccc89b65:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:57,181 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:57,616 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@9f37e[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34495, datanodeUuid=a1f556c6-aae3-4ee9-826c-02aac278b22e, infoPort=46285, infoSecurePort=0, ipcPort=42993, storageInfo=lv=-57;cid=testClusterID;nsid=989579585;c=1731243630326):Failed to transfer BP-925157896-172.17.0.2-1731243630326:blk_1073741886_1069 to 127.0.0.1:33513 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:00:58,139 INFO [regionserver/3857ccc89b65:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:58,871 INFO [regionserver/3857ccc89b65:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:00:59,182 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:01:00,139 INFO [regionserver/3857ccc89b65:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:01:00,872 INFO [regionserver/3857ccc89b65:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:01:01,031 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-10T13:01:01,182 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:01:01,288 ERROR [FSHLog-0-hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/MasterData-prefix:3857ccc89b65,44143,1731243631050 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:01:01,288 WARN [FSHLog-0-hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/MasterData-prefix:3857ccc89b65,44143,1731243631050 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:01:01,288 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 3857ccc89b65%2C44143%2C1731243631050:(num 1731243631199) roll requested 2024-11-10T13:01:01,288 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3857ccc89b65%2C44143%2C1731243631050.1731243661288 2024-11-10T13:01:01,291 WARN [Thread-1008 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741887_1070 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:01:01,291 WARN [Thread-1008 {}] hdfs.DataStreamer(1731): Error Recovery for BP-925157896-172.17.0.2-1731243630326:blk_1073741887_1070 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40561,DS-ce5a4fa7-c07a-4ea4-8fa2-dd4afc17cb96,DISK], DatanodeInfoWithStorage[127.0.0.1:44447,DS-7d81c0d1-69f3-4b0a-a084-b628cd80149c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40561,DS-ce5a4fa7-c07a-4ea4-8fa2-dd4afc17cb96,DISK]) is bad. 2024-11-10T13:01:01,291 WARN [Thread-1008 {}] hdfs.DataStreamer(1850): Abandoning BP-925157896-172.17.0.2-1731243630326:blk_1073741887_1070 2024-11-10T13:01:01,292 WARN [Thread-1008 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40561,DS-ce5a4fa7-c07a-4ea4-8fa2-dd4afc17cb96,DISK] 2024-11-10T13:01:01,293 WARN [Thread-1008 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741888_1071 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:01:01,293 WARN [Thread-1008 {}] hdfs.DataStreamer(1731): Error Recovery for BP-925157896-172.17.0.2-1731243630326:blk_1073741888_1071 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK], DatanodeInfoWithStorage[127.0.0.1:34495,DS-6280841e-9118-428f-9e2a-14ca17e63f55,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]) is bad. 2024-11-10T13:01:01,293 WARN [Thread-1008 {}] hdfs.DataStreamer(1850): Abandoning BP-925157896-172.17.0.2-1731243630326:blk_1073741888_1071 2024-11-10T13:01:01,294 WARN [Thread-1008 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK] 2024-11-10T13:01:01,296 WARN [Thread-1008 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741889_1072 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33513 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:01:01,296 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1469845502_22 at /127.0.0.1:44514 [Receiving block BP-925157896-172.17.0.2-1731243630326:blk_1073741889_1072] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/cluster_ca3516ed-3462-34cb-04d5-c3857eb856b7/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/cluster_ca3516ed-3462-34cb-04d5-c3857eb856b7/data/data4]'}, localName='127.0.0.1:44447', datanodeUuid='07be666f-232a-4760-aea5-e1fb3b4f419c', xmitsInProgress=0}:Exception transferring block BP-925157896-172.17.0.2-1731243630326:blk_1073741889_1072 to mirror 127.0.0.1:33513 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:01:01,296 WARN [Thread-1008 {}] hdfs.DataStreamer(1731): Error Recovery for BP-925157896-172.17.0.2-1731243630326:blk_1073741889_1072 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44447,DS-7d81c0d1-69f3-4b0a-a084-b628cd80149c,DISK], DatanodeInfoWithStorage[127.0.0.1:33513,DS-b5f80209-4e09-49c3-8a4c-611ce85f484d,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33513,DS-b5f80209-4e09-49c3-8a4c-611ce85f484d,DISK]) is bad. 2024-11-10T13:01:01,296 WARN [Thread-1008 {}] hdfs.DataStreamer(1850): Abandoning BP-925157896-172.17.0.2-1731243630326:blk_1073741889_1072 2024-11-10T13:01:01,296 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1469845502_22 at /127.0.0.1:44514 [Receiving block BP-925157896-172.17.0.2-1731243630326:blk_1073741889_1072] {}] datanode.BlockReceiver(316): Block 1073741889 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-10T13:01:01,296 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1469845502_22 at /127.0.0.1:44514 [Receiving block BP-925157896-172.17.0.2-1731243630326:blk_1073741889_1072] {}] datanode.DataXceiver(331): 127.0.0.1:44447:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44514 dst: /127.0.0.1:44447 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:01:01,297 WARN [Thread-1008 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33513,DS-b5f80209-4e09-49c3-8a4c-611ce85f484d,DISK] 2024-11-10T13:01:01,300 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:01,300 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:01,301 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:01,301 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:01,301 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:01,301 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/MasterData/WALs/3857ccc89b65,44143,1731243631050/3857ccc89b65%2C44143%2C1731243631050.1731243631199 with entries=54, filesize=26.67 KB; new WAL /user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/MasterData/WALs/3857ccc89b65,44143,1731243631050/3857ccc89b65%2C44143%2C1731243631050.1731243661288 2024-11-10T13:01:01,301 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:01:01,301 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:01:01,302 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/MasterData/WALs/3857ccc89b65,44143,1731243631050/3857ccc89b65%2C44143%2C1731243631050.1731243631199 2024-11-10T13:01:01,302 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38535:38535),(127.0.0.1/127.0.0.1:46285:46285)] 2024-11-10T13:01:01,302 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/MasterData/WALs/3857ccc89b65,44143,1731243631050/3857ccc89b65%2C44143%2C1731243631050.1731243631199 is not closed yet, will try archiving it next time 2024-11-10T13:01:01,302 WARN [IPC Server handler 3 on default port 35903 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/MasterData/WALs/3857ccc89b65,44143,1731243631050/3857ccc89b65%2C44143%2C1731243631050.1731243631199 has not been closed. Lease recovery is in progress. RecoveryId = 1074 for block blk_1073741830_1006 2024-11-10T13:01:01,302 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/MasterData/WALs/3857ccc89b65,44143,1731243631050/3857ccc89b65%2C44143%2C1731243631050.1731243631199 after 0ms 2024-11-10T13:01:02,140 INFO [regionserver/3857ccc89b65:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:01:02,872 INFO [regionserver/3857ccc89b65:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:01:04,140 INFO [regionserver/3857ccc89b65:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:01:04,873 INFO [regionserver/3857ccc89b65:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:01:05,197 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@4a8289f2 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-925157896-172.17.0.2-1731243630326:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:40725,null,null]) java.net.ConnectException: Call From 3857ccc89b65/172.17.0.2 to localhost:43009 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-10T13:01:05,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44447 is added to blk_1073741833_1020 (size=455) 2024-11-10T13:01:05,303 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/MasterData/WALs/3857ccc89b65,44143,1731243631050/3857ccc89b65%2C44143%2C1731243631050.1731243631199 after 4001ms 2024-11-10T13:01:05,844 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.1731243631502 to hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/oldWALs/3857ccc89b65%2C44789%2C1731243631098.1731243631502 2024-11-10T13:01:05,845 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.1731243650835 to hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/oldWALs/3857ccc89b65%2C44789%2C1731243631098.1731243650835 2024-11-10T13:01:06,140 INFO [regionserver/3857ccc89b65:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:01:06,873 INFO [regionserver/3857ccc89b65:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:01:07,179 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@2aad76bd[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44447, datanodeUuid=07be666f-232a-4760-aea5-e1fb3b4f419c, infoPort=38535, infoSecurePort=0, ipcPort=39161, storageInfo=lv=-57;cid=testClusterID;nsid=989579585;c=1731243630326):Failed to transfer BP-925157896-172.17.0.2-1731243630326:blk_1073741835_1011 to 127.0.0.1:33513 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:01:07,179 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@74bebcc1[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44447, datanodeUuid=07be666f-232a-4760-aea5-e1fb3b4f419c, infoPort=38535, infoSecurePort=0, ipcPort=39161, storageInfo=lv=-57;cid=testClusterID;nsid=989579585;c=1731243630326):Failed to transfer BP-925157896-172.17.0.2-1731243630326:blk_1073741833_1020 to 127.0.0.1:33513 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:01:08,141 INFO [regionserver/3857ccc89b65:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:01:08,179 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@74bebcc1[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44447, datanodeUuid=07be666f-232a-4760-aea5-e1fb3b4f419c, infoPort=38535, infoSecurePort=0, ipcPort=39161, storageInfo=lv=-57;cid=testClusterID;nsid=989579585;c=1731243630326):Failed to transfer BP-925157896-172.17.0.2-1731243630326:blk_1073741831_1007 to 127.0.0.1:33513 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:01:08,179 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@2aad76bd[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44447, datanodeUuid=07be666f-232a-4760-aea5-e1fb3b4f419c, infoPort=38535, infoSecurePort=0, ipcPort=39161, storageInfo=lv=-57;cid=testClusterID;nsid=989579585;c=1731243630326):Failed to transfer BP-925157896-172.17.0.2-1731243630326:blk_1073741829_1005 to 127.0.0.1:33513 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:01:08,495 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3857ccc89b65%2C44789%2C1731243631098.1731243668495 2024-11-10T13:01:08,499 WARN [Thread-1021 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741891_1075 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33513 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:01:08,499 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1469845502_22 at /127.0.0.1:44546 [Receiving block BP-925157896-172.17.0.2-1731243630326:blk_1073741891_1075] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/cluster_ca3516ed-3462-34cb-04d5-c3857eb856b7/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/cluster_ca3516ed-3462-34cb-04d5-c3857eb856b7/data/data4]'}, localName='127.0.0.1:44447', datanodeUuid='07be666f-232a-4760-aea5-e1fb3b4f419c', xmitsInProgress=0}:Exception transferring block BP-925157896-172.17.0.2-1731243630326:blk_1073741891_1075 to mirror 127.0.0.1:33513 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:01:08,499 WARN [Thread-1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-925157896-172.17.0.2-1731243630326:blk_1073741891_1075 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44447,DS-7d81c0d1-69f3-4b0a-a084-b628cd80149c,DISK], DatanodeInfoWithStorage[127.0.0.1:33513,DS-b5f80209-4e09-49c3-8a4c-611ce85f484d,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33513,DS-b5f80209-4e09-49c3-8a4c-611ce85f484d,DISK]) is bad. 2024-11-10T13:01:08,499 WARN [Thread-1021 {}] hdfs.DataStreamer(1850): Abandoning BP-925157896-172.17.0.2-1731243630326:blk_1073741891_1075 2024-11-10T13:01:08,499 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1469845502_22 at /127.0.0.1:44546 [Receiving block BP-925157896-172.17.0.2-1731243630326:blk_1073741891_1075] {}] datanode.BlockReceiver(316): Block 1073741891 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-10T13:01:08,499 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1469845502_22 at /127.0.0.1:44546 [Receiving block BP-925157896-172.17.0.2-1731243630326:blk_1073741891_1075] {}] datanode.DataXceiver(331): 127.0.0.1:44447:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44546 dst: /127.0.0.1:44447 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:01:08,500 WARN [Thread-1021 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33513,DS-b5f80209-4e09-49c3-8a4c-611ce85f484d,DISK] 2024-11-10T13:01:08,504 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:08,504 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:08,504 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:08,504 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:08,505 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:08,505 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.1731243652854 with entries=13, filesize=12.60 KB; new WAL /user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.1731243668495 2024-11-10T13:01:08,505 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46285:46285),(127.0.0.1/127.0.0.1:38535:38535)] 2024-11-10T13:01:08,506 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.1731243652854 is not closed yet, will try archiving it next time 2024-11-10T13:01:08,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34495 is added to blk_1073741876_1059 (size=12911) 2024-11-10T13:01:08,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44789 {}] regionserver.HRegion(8855): Flush requested on 420a6a06ae441e38de6228ffc018d528 2024-11-10T13:01:08,510 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 420a6a06ae441e38de6228ffc018d528 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-10T13:01:08,515 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/.tmp/info/68b044edeb094f90ba0511e8a28bc513 is 1080, key is row0013/info:/1731243668507/Put/seqid=0 2024-11-10T13:01:08,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34495 is added to blk_1073741893_1077 (size=8190) 2024-11-10T13:01:08,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44447 is added to blk_1073741893_1077 (size=8190) 2024-11-10T13:01:08,522 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/.tmp/info/68b044edeb094f90ba0511e8a28bc513 2024-11-10T13:01:08,529 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/.tmp/info/68b044edeb094f90ba0511e8a28bc513 as hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/info/68b044edeb094f90ba0511e8a28bc513 2024-11-10T13:01:08,535 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/info/68b044edeb094f90ba0511e8a28bc513, entries=3, sequenceid=66, filesize=8.0 K 2024-11-10T13:01:08,536 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7527, heapSize ~8.11 KB/8304, currentSize=9.46 KB/9683 for 420a6a06ae441e38de6228ffc018d528 in 26ms, sequenceid=66, compaction requested=false 2024-11-10T13:01:08,536 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 420a6a06ae441e38de6228ffc018d528: 2024-11-10T13:01:08,536 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=25.7 K, sizeToCheck=16.0 K 2024-11-10T13:01:08,536 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-10T13:01:08,536 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/info/9f9e0d8d74dc438bbc30b2205e6686bf because midkey is the same as first or last row 2024-11-10T13:01:08,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44789 {}] regionserver.HRegion(8855): Flush requested on 420a6a06ae441e38de6228ffc018d528 2024-11-10T13:01:08,734 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 420a6a06ae441e38de6228ffc018d528 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-11-10T13:01:08,739 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/.tmp/info/775b2490aa7a489685292e58c43d9cbd is 1080, key is row0015/info:/1731243668512/Put/seqid=0 2024-11-10T13:01:08,742 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1012691277_22 at /127.0.0.1:34508 [Receiving block BP-925157896-172.17.0.2-1731243630326:blk_1073741894_1078] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/cluster_ca3516ed-3462-34cb-04d5-c3857eb856b7/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/cluster_ca3516ed-3462-34cb-04d5-c3857eb856b7/data/data8]'}, localName='127.0.0.1:34495', datanodeUuid='a1f556c6-aae3-4ee9-826c-02aac278b22e', xmitsInProgress=0}:Exception transferring block BP-925157896-172.17.0.2-1731243630326:blk_1073741894_1078 to mirror 127.0.0.1:33513 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:01:08,742 WARN [Thread-1036 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741894_1078 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33513 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:01:08,742 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1012691277_22 at /127.0.0.1:34508 [Receiving block BP-925157896-172.17.0.2-1731243630326:blk_1073741894_1078] {}] datanode.BlockReceiver(316): Block 1073741894 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-10T13:01:08,742 WARN [Thread-1036 {}] hdfs.DataStreamer(1731): Error Recovery for BP-925157896-172.17.0.2-1731243630326:blk_1073741894_1078 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34495,DS-6280841e-9118-428f-9e2a-14ca17e63f55,DISK], DatanodeInfoWithStorage[127.0.0.1:33513,DS-b5f80209-4e09-49c3-8a4c-611ce85f484d,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33513,DS-b5f80209-4e09-49c3-8a4c-611ce85f484d,DISK]) is bad. 2024-11-10T13:01:08,742 WARN [Thread-1036 {}] hdfs.DataStreamer(1850): Abandoning BP-925157896-172.17.0.2-1731243630326:blk_1073741894_1078 2024-11-10T13:01:08,742 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1012691277_22 at /127.0.0.1:34508 [Receiving block BP-925157896-172.17.0.2-1731243630326:blk_1073741894_1078] {}] datanode.DataXceiver(331): 127.0.0.1:34495:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34508 dst: /127.0.0.1:34495 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:01:08,743 WARN [Thread-1036 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33513,DS-b5f80209-4e09-49c3-8a4c-611ce85f484d,DISK] 2024-11-10T13:01:08,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34495 is added to blk_1073741895_1079 (size=14660) 2024-11-10T13:01:08,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44447 is added to blk_1073741895_1079 (size=14660) 2024-11-10T13:01:08,748 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/.tmp/info/775b2490aa7a489685292e58c43d9cbd 2024-11-10T13:01:08,754 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/.tmp/info/775b2490aa7a489685292e58c43d9cbd as hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/info/775b2490aa7a489685292e58c43d9cbd 2024-11-10T13:01:08,759 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/info/775b2490aa7a489685292e58c43d9cbd, entries=9, sequenceid=79, filesize=14.3 K 2024-11-10T13:01:08,760 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10758, heapSize ~11.48 KB/11760, currentSize=0 B/0 for 420a6a06ae441e38de6228ffc018d528 in 26ms, sequenceid=79, compaction requested=true 2024-11-10T13:01:08,760 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 420a6a06ae441e38de6228ffc018d528: 2024-11-10T13:01:08,760 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=40.0 K, sizeToCheck=16.0 K 2024-11-10T13:01:08,760 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-10T13:01:08,760 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/info/9f9e0d8d74dc438bbc30b2205e6686bf because midkey is the same as first or last row 2024-11-10T13:01:08,761 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 420a6a06ae441e38de6228ffc018d528:info, priority=-2147483648, current under compaction store size is 1 2024-11-10T13:01:08,761 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-10T13:01:08,761 DEBUG [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-10T13:01:08,762 DEBUG [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40947 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-10T13:01:08,762 DEBUG [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] regionserver.HStore(1541): 420a6a06ae441e38de6228ffc018d528/info is initiating minor compaction (all files) 2024-11-10T13:01:08,762 INFO [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 420a6a06ae441e38de6228ffc018d528/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731243632170.420a6a06ae441e38de6228ffc018d528. 2024-11-10T13:01:08,762 INFO [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/info/9f9e0d8d74dc438bbc30b2205e6686bf, hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/info/68b044edeb094f90ba0511e8a28bc513, hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/info/775b2490aa7a489685292e58c43d9cbd] into tmpdir=hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/.tmp, totalSize=40.0 K 2024-11-10T13:01:08,762 DEBUG [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9f9e0d8d74dc438bbc30b2205e6686bf, keycount=12, bloomtype=ROW, size=17.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1731243644833 2024-11-10T13:01:08,763 DEBUG [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] compactions.Compactor(225): Compacting 68b044edeb094f90ba0511e8a28bc513, keycount=3, bloomtype=ROW, size=8.0 K, encoding=NONE, compression=NONE, seqNum=66, earliestPutTs=1731243654744 2024-11-10T13:01:08,763 DEBUG [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] compactions.Compactor(225): Compacting 775b2490aa7a489685292e58c43d9cbd, keycount=9, bloomtype=ROW, size=14.3 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1731243668512 2024-11-10T13:01:08,774 INFO [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 420a6a06ae441e38de6228ffc018d528#info#compaction#27 average throughput is 22.57 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-10T13:01:08,775 DEBUG [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/.tmp/info/ebdc5aefd6564b58beecba9420b4d82f is 1080, key is row0002/info:/1731243644833/Put/seqid=0 2024-11-10T13:01:08,776 WARN [Thread-1044 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741896_1080 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:01:08,776 WARN [Thread-1044 {}] hdfs.DataStreamer(1731): Error Recovery for BP-925157896-172.17.0.2-1731243630326:blk_1073741896_1080 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33513,DS-b5f80209-4e09-49c3-8a4c-611ce85f484d,DISK], DatanodeInfoWithStorage[127.0.0.1:44447,DS-7d81c0d1-69f3-4b0a-a084-b628cd80149c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33513,DS-b5f80209-4e09-49c3-8a4c-611ce85f484d,DISK]) is bad. 2024-11-10T13:01:08,776 WARN [Thread-1044 {}] hdfs.DataStreamer(1850): Abandoning BP-925157896-172.17.0.2-1731243630326:blk_1073741896_1080 2024-11-10T13:01:08,777 WARN [Thread-1044 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33513,DS-b5f80209-4e09-49c3-8a4c-611ce85f484d,DISK] 2024-11-10T13:01:08,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34495 is added to blk_1073741897_1081 (size=28989) 2024-11-10T13:01:08,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44447 is added to blk_1073741897_1081 (size=28989) 2024-11-10T13:01:08,787 DEBUG [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/.tmp/info/ebdc5aefd6564b58beecba9420b4d82f as hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/info/ebdc5aefd6564b58beecba9420b4d82f 2024-11-10T13:01:08,793 INFO [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 420a6a06ae441e38de6228ffc018d528/info of 420a6a06ae441e38de6228ffc018d528 into ebdc5aefd6564b58beecba9420b4d82f(size=28.3 K), total size for store is 28.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-10T13:01:08,793 DEBUG [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 420a6a06ae441e38de6228ffc018d528: 2024-11-10T13:01:08,793 INFO [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731243632170.420a6a06ae441e38de6228ffc018d528., storeName=420a6a06ae441e38de6228ffc018d528/info, priority=13, startTime=1731243668760; duration=0sec 2024-11-10T13:01:08,793 DEBUG [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.3 K, sizeToCheck=16.0 K 2024-11-10T13:01:08,793 DEBUG [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-10T13:01:08,794 DEBUG [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/info/ebdc5aefd6564b58beecba9420b4d82f because midkey is the same as first or last row 2024-11-10T13:01:08,794 DEBUG [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.3 K, sizeToCheck=16.0 K 2024-11-10T13:01:08,794 DEBUG [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-10T13:01:08,794 DEBUG [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/info/ebdc5aefd6564b58beecba9420b4d82f because midkey is the same as first or last row 2024-11-10T13:01:08,794 DEBUG [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.3 K, sizeToCheck=16.0 K 2024-11-10T13:01:08,794 DEBUG [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-10T13:01:08,794 DEBUG [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/info/ebdc5aefd6564b58beecba9420b4d82f because midkey is the same as first or last row 2024-11-10T13:01:08,794 DEBUG [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-10T13:01:08,794 DEBUG [RS:0;3857ccc89b65:44789-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 420a6a06ae441e38de6228ffc018d528:info 2024-11-10T13:01:08,873 INFO [regionserver/3857ccc89b65:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:01:08,873 INFO [regionserver/3857ccc89b65:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-11-10T13:01:08,907 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.1731243652854 to hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/oldWALs/3857ccc89b65%2C44789%2C1731243631098.1731243652854 2024-11-10T13:01:08,935 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-10T13:01:08,935 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-10T13:01:08,935 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T13:01:08,935 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T13:01:08,936 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T13:01:08,936 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-10T13:01:08,936 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-10T13:01:08,936 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1218406482, stopped=false 2024-11-10T13:01:08,936 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=3857ccc89b65,44143,1731243631050 2024-11-10T13:01:08,937 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44143-0x10101f74c490000, quorum=127.0.0.1:56510, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-10T13:01:08,937 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44789-0x10101f74c490001, quorum=127.0.0.1:56510, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-10T13:01:08,938 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44143-0x10101f74c490000, quorum=127.0.0.1:56510, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:01:08,938 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38535-0x10101f74c490002, quorum=127.0.0.1:56510, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-10T13:01:08,938 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44789-0x10101f74c490001, quorum=127.0.0.1:56510, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:01:08,938 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38535-0x10101f74c490002, quorum=127.0.0.1:56510, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:01:08,938 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-10T13:01:08,938 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-10T13:01:08,938 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T13:01:08,938 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44789-0x10101f74c490001, quorum=127.0.0.1:56510, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T13:01:08,938 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T13:01:08,938 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38535-0x10101f74c490002, quorum=127.0.0.1:56510, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T13:01:08,938 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:44143-0x10101f74c490000, quorum=127.0.0.1:56510, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T13:01:08,939 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3857ccc89b65,44789,1731243631098' ***** 2024-11-10T13:01:08,939 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-10T13:01:08,939 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3857ccc89b65,38535,1731243632054' ***** 2024-11-10T13:01:08,939 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-10T13:01:08,939 INFO [RS:0;3857ccc89b65:44789 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-10T13:01:08,939 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-10T13:01:08,939 INFO [RS:0;3857ccc89b65:44789 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-10T13:01:08,939 INFO [RS:1;3857ccc89b65:38535 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-10T13:01:08,939 INFO [RS:0;3857ccc89b65:44789 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-10T13:01:08,939 INFO [RS:1;3857ccc89b65:38535 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-10T13:01:08,939 INFO [RS:0;3857ccc89b65:44789 {}] regionserver.HRegionServer(3091): Received CLOSE for 420a6a06ae441e38de6228ffc018d528 2024-11-10T13:01:08,939 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-10T13:01:08,939 INFO [RS:1;3857ccc89b65:38535 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-10T13:01:08,939 INFO [RS:1;3857ccc89b65:38535 {}] regionserver.HRegionServer(959): stopping server 3857ccc89b65,38535,1731243632054 2024-11-10T13:01:08,939 INFO [RS:1;3857ccc89b65:38535 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-10T13:01:08,939 INFO [RS:1;3857ccc89b65:38535 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;3857ccc89b65:38535. 2024-11-10T13:01:08,940 DEBUG [RS:1;3857ccc89b65:38535 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T13:01:08,940 DEBUG [RS:1;3857ccc89b65:38535 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T13:01:08,940 INFO [RS:0;3857ccc89b65:44789 {}] regionserver.HRegionServer(959): stopping server 3857ccc89b65,44789,1731243631098 2024-11-10T13:01:08,940 INFO [RS:0;3857ccc89b65:44789 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-10T13:01:08,940 INFO [RS:1;3857ccc89b65:38535 {}] regionserver.HRegionServer(976): stopping server 3857ccc89b65,38535,1731243632054; all regions closed. 2024-11-10T13:01:08,940 INFO [RS:0;3857ccc89b65:44789 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;3857ccc89b65:44789. 2024-11-10T13:01:08,940 DEBUG [RS:0;3857ccc89b65:44789 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T13:01:08,940 DEBUG [RS:0;3857ccc89b65:44789 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T13:01:08,940 INFO [RS:0;3857ccc89b65:44789 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-10T13:01:08,940 INFO [RS:0;3857ccc89b65:44789 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-10T13:01:08,940 INFO [RS:0;3857ccc89b65:44789 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-10T13:01:08,940 DEBUG [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 420a6a06ae441e38de6228ffc018d528, disabling compactions & flushes 2024-11-10T13:01:08,940 INFO [RS:0;3857ccc89b65:44789 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-10T13:01:08,940 INFO [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731243632170.420a6a06ae441e38de6228ffc018d528. 2024-11-10T13:01:08,940 DEBUG [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731243632170.420a6a06ae441e38de6228ffc018d528. 2024-11-10T13:01:08,940 DEBUG [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731243632170.420a6a06ae441e38de6228ffc018d528. after waiting 0 ms 2024-11-10T13:01:08,940 DEBUG [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731243632170.420a6a06ae441e38de6228ffc018d528. 2024-11-10T13:01:08,940 INFO [RS:0;3857ccc89b65:44789 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-10T13:01:08,940 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:08,940 DEBUG [RS:0;3857ccc89b65:44789 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 420a6a06ae441e38de6228ffc018d528=TestLogRolling-testLogRollOnDatanodeDeath,,1731243632170.420a6a06ae441e38de6228ffc018d528.} 2024-11-10T13:01:08,940 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:08,940 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-10T13:01:08,940 DEBUG [RS:0;3857ccc89b65:44789 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 420a6a06ae441e38de6228ffc018d528 2024-11-10T13:01:08,940 INFO [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-10T13:01:08,941 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-10T13:01:08,941 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-10T13:01:08,941 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-10T13:01:08,941 INFO [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-11-10T13:01:08,941 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731243632170.420a6a06ae441e38de6228ffc018d528.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/info/376868b067c14a7798ad69e9fc5094fc, hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/info/07e85aef0cc7405598c4a2dbcf5522b8, hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/info/56750e6709c448c180f63ea6df91676a, hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/info/05d0109d1e78435c93eec29e57f1dd78, hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/info/751690d8d228490c83c7e0abfbe81e08, hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/info/9f9e0d8d74dc438bbc30b2205e6686bf, hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/info/340053af235f4b7bb4f408fe10459238, hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/info/68b044edeb094f90ba0511e8a28bc513, hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/info/775b2490aa7a489685292e58c43d9cbd] to archive 2024-11-10T13:01:08,941 ERROR [FSHLog-0-hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de-prefix:3857ccc89b65,44789,1731243631098.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:01:08,941 WARN [FSHLog-0-hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de-prefix:3857ccc89b65,44789,1731243631098.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:01:08,941 DEBUG [regionserver/3857ccc89b65:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3857ccc89b65%2C44789%2C1731243631098.meta:.meta(num 1731243631901) roll requested 2024-11-10T13:01:08,941 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:08,942 INFO [regionserver/3857ccc89b65:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3857ccc89b65%2C44789%2C1731243631098.meta.1731243668941.meta 2024-11-10T13:01:08,942 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:08,942 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:08,942 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:01:08,942 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:01:08,942 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731243632170.420a6a06ae441e38de6228ffc018d528.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-10T13:01:08,942 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 2024-11-10T13:01:08,943 WARN [IPC Server handler 3 on default port 35903 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 has not been closed. Lease recovery is in progress. RecoveryId = 1082 for block blk_1073741837_1013 2024-11-10T13:01:08,943 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 after 1ms 2024-11-10T13:01:08,944 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731243632170.420a6a06ae441e38de6228ffc018d528.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/info/376868b067c14a7798ad69e9fc5094fc to hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/info/376868b067c14a7798ad69e9fc5094fc 2024-11-10T13:01:08,945 WARN [Thread-1050 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741898_1083 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33513 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:01:08,945 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1012691277_22 at /127.0.0.1:44618 [Receiving block BP-925157896-172.17.0.2-1731243630326:blk_1073741898_1083] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/cluster_ca3516ed-3462-34cb-04d5-c3857eb856b7/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/cluster_ca3516ed-3462-34cb-04d5-c3857eb856b7/data/data4]'}, localName='127.0.0.1:44447', datanodeUuid='07be666f-232a-4760-aea5-e1fb3b4f419c', xmitsInProgress=0}:Exception transferring block BP-925157896-172.17.0.2-1731243630326:blk_1073741898_1083 to mirror 127.0.0.1:33513 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:01:08,946 WARN [Thread-1050 {}] hdfs.DataStreamer(1731): Error Recovery for BP-925157896-172.17.0.2-1731243630326:blk_1073741898_1083 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44447,DS-7d81c0d1-69f3-4b0a-a084-b628cd80149c,DISK], DatanodeInfoWithStorage[127.0.0.1:33513,DS-b5f80209-4e09-49c3-8a4c-611ce85f484d,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33513,DS-b5f80209-4e09-49c3-8a4c-611ce85f484d,DISK]) is bad. 2024-11-10T13:01:08,946 WARN [Thread-1050 {}] hdfs.DataStreamer(1850): Abandoning BP-925157896-172.17.0.2-1731243630326:blk_1073741898_1083 2024-11-10T13:01:08,946 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1012691277_22 at /127.0.0.1:44618 [Receiving block BP-925157896-172.17.0.2-1731243630326:blk_1073741898_1083] {}] datanode.BlockReceiver(316): Block 1073741898 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-10T13:01:08,946 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1012691277_22 at /127.0.0.1:44618 [Receiving block BP-925157896-172.17.0.2-1731243630326:blk_1073741898_1083] {}] datanode.DataXceiver(331): 127.0.0.1:44447:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44618 dst: /127.0.0.1:44447 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:01:08,946 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731243632170.420a6a06ae441e38de6228ffc018d528.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/info/07e85aef0cc7405598c4a2dbcf5522b8 to hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/info/07e85aef0cc7405598c4a2dbcf5522b8 2024-11-10T13:01:08,946 WARN [Thread-1050 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33513,DS-b5f80209-4e09-49c3-8a4c-611ce85f484d,DISK] 2024-11-10T13:01:08,947 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731243632170.420a6a06ae441e38de6228ffc018d528.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/info/56750e6709c448c180f63ea6df91676a to hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/info/56750e6709c448c180f63ea6df91676a 2024-11-10T13:01:08,949 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731243632170.420a6a06ae441e38de6228ffc018d528.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/info/05d0109d1e78435c93eec29e57f1dd78 to hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/info/05d0109d1e78435c93eec29e57f1dd78 2024-11-10T13:01:08,950 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:08,950 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:08,950 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:08,951 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:08,951 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731243632170.420a6a06ae441e38de6228ffc018d528.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/info/751690d8d228490c83c7e0abfbe81e08 to hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/info/751690d8d228490c83c7e0abfbe81e08 2024-11-10T13:01:08,951 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:08,951 INFO [regionserver/3857ccc89b65:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243668941.meta 2024-11-10T13:01:08,952 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731243632170.420a6a06ae441e38de6228ffc018d528.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/info/9f9e0d8d74dc438bbc30b2205e6686bf to hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/info/9f9e0d8d74dc438bbc30b2205e6686bf 2024-11-10T13:01:08,953 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731243632170.420a6a06ae441e38de6228ffc018d528.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/info/340053af235f4b7bb4f408fe10459238 to hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/info/340053af235f4b7bb4f408fe10459238 2024-11-10T13:01:08,954 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731243632170.420a6a06ae441e38de6228ffc018d528.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/info/68b044edeb094f90ba0511e8a28bc513 to hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/info/68b044edeb094f90ba0511e8a28bc513 2024-11-10T13:01:08,956 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731243632170.420a6a06ae441e38de6228ffc018d528.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/info/775b2490aa7a489685292e58c43d9cbd to hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/info/775b2490aa7a489685292e58c43d9cbd 2024-11-10T13:01:08,956 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731243632170.420a6a06ae441e38de6228ffc018d528.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=3857ccc89b65:44143 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-10T13:01:08,956 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731243632170.420a6a06ae441e38de6228ffc018d528.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [376868b067c14a7798ad69e9fc5094fc=10347, 07e85aef0cc7405598c4a2dbcf5522b8=12506, 56750e6709c448c180f63ea6df91676a=17994, 05d0109d1e78435c93eec29e57f1dd78=6027, 751690d8d228490c83c7e0abfbe81e08=6027, 9f9e0d8d74dc438bbc30b2205e6686bf=18097, 340053af235f4b7bb4f408fe10459238=6027, 68b044edeb094f90ba0511e8a28bc513=8190, 775b2490aa7a489685292e58c43d9cbd=14660] 2024-11-10T13:01:08,957 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:01:08,957 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40725,DS-4fde2194-6593-44d9-9bb3-4066b71ca62b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:01:08,957 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta 2024-11-10T13:01:08,958 WARN [IPC Server handler 2 on default port 35903 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta has not been closed. Lease recovery is in progress. RecoveryId = 1085 for block blk_1073741834_1010 2024-11-10T13:01:08,958 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta after 1ms 2024-11-10T13:01:08,964 DEBUG [regionserver/3857ccc89b65:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38535:38535),(127.0.0.1/127.0.0.1:46285:46285)] 2024-11-10T13:01:08,964 DEBUG [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/default/TestLogRolling-testLogRollOnDatanodeDeath/420a6a06ae441e38de6228ffc018d528/recovered.edits/83.seqid, newMaxSeqId=83, maxSeqId=1 2024-11-10T13:01:08,964 DEBUG [regionserver/3857ccc89b65:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta is not closed yet, will try archiving it next time 2024-11-10T13:01:08,965 INFO [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731243632170.420a6a06ae441e38de6228ffc018d528. 2024-11-10T13:01:08,965 DEBUG [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 420a6a06ae441e38de6228ffc018d528: Waiting for close lock at 1731243668940Running coprocessor pre-close hooks at 1731243668940Disabling compacts and flushes for region at 1731243668940Disabling writes for close at 1731243668940Writing region close event to WAL at 1731243668957 (+17 ms)Running coprocessor post-close hooks at 1731243668965 (+8 ms)Closed at 1731243668965 2024-11-10T13:01:08,965 DEBUG [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731243632170.420a6a06ae441e38de6228ffc018d528. 2024-11-10T13:01:08,983 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/hbase/meta/1588230740/.tmp/info/86cc2d49744241d59facad92d8b4c553 is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1731243632170.420a6a06ae441e38de6228ffc018d528./info:regioninfo/1731243632535/Put/seqid=0 2024-11-10T13:01:08,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34495 is added to blk_1073741900_1086 (size=7089) 2024-11-10T13:01:08,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44447 is added to blk_1073741900_1086 (size=7089) 2024-11-10T13:01:08,988 INFO [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/hbase/meta/1588230740/.tmp/info/86cc2d49744241d59facad92d8b4c553 2024-11-10T13:01:09,009 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/hbase/meta/1588230740/.tmp/ns/ca84faba58e5493aa9574aee39080865 is 43, key is default/ns:d/1731243631948/Put/seqid=0 2024-11-10T13:01:09,012 WARN [Thread-1064 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741901_1087 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33513 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:01:09,011 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1012691277_22 at /127.0.0.1:44652 [Receiving block BP-925157896-172.17.0.2-1731243630326:blk_1073741901_1087] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/cluster_ca3516ed-3462-34cb-04d5-c3857eb856b7/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/cluster_ca3516ed-3462-34cb-04d5-c3857eb856b7/data/data4]'}, localName='127.0.0.1:44447', datanodeUuid='07be666f-232a-4760-aea5-e1fb3b4f419c', xmitsInProgress=0}:Exception transferring block BP-925157896-172.17.0.2-1731243630326:blk_1073741901_1087 to mirror 127.0.0.1:33513 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:01:09,012 WARN [Thread-1064 {}] hdfs.DataStreamer(1731): Error Recovery for BP-925157896-172.17.0.2-1731243630326:blk_1073741901_1087 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44447,DS-7d81c0d1-69f3-4b0a-a084-b628cd80149c,DISK], DatanodeInfoWithStorage[127.0.0.1:33513,DS-b5f80209-4e09-49c3-8a4c-611ce85f484d,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33513,DS-b5f80209-4e09-49c3-8a4c-611ce85f484d,DISK]) is bad. 2024-11-10T13:01:09,012 WARN [Thread-1064 {}] hdfs.DataStreamer(1850): Abandoning BP-925157896-172.17.0.2-1731243630326:blk_1073741901_1087 2024-11-10T13:01:09,012 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1012691277_22 at /127.0.0.1:44652 [Receiving block BP-925157896-172.17.0.2-1731243630326:blk_1073741901_1087] {}] datanode.BlockReceiver(316): Block 1073741901 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-10T13:01:09,012 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1012691277_22 at /127.0.0.1:44652 [Receiving block BP-925157896-172.17.0.2-1731243630326:blk_1073741901_1087] {}] datanode.DataXceiver(331): 127.0.0.1:44447:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44652 dst: /127.0.0.1:44447 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:01:09,013 WARN [Thread-1064 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33513,DS-b5f80209-4e09-49c3-8a4c-611ce85f484d,DISK] 2024-11-10T13:01:09,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34495 is added to blk_1073741902_1088 (size=5153) 2024-11-10T13:01:09,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44447 is added to blk_1073741902_1088 (size=5153) 2024-11-10T13:01:09,018 INFO [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/hbase/meta/1588230740/.tmp/ns/ca84faba58e5493aa9574aee39080865 2024-11-10T13:01:09,048 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/hbase/meta/1588230740/.tmp/table/970516b74bb84bdba2ff777044c1cc43 is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1731243632546/Put/seqid=0 2024-11-10T13:01:09,050 WARN [Thread-1071 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741903_1089 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:01:09,051 WARN [Thread-1071 {}] hdfs.DataStreamer(1731): Error Recovery for BP-925157896-172.17.0.2-1731243630326:blk_1073741903_1089 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33513,DS-b5f80209-4e09-49c3-8a4c-611ce85f484d,DISK], DatanodeInfoWithStorage[127.0.0.1:44447,DS-7d81c0d1-69f3-4b0a-a084-b628cd80149c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33513,DS-b5f80209-4e09-49c3-8a4c-611ce85f484d,DISK]) is bad. 2024-11-10T13:01:09,051 WARN [Thread-1071 {}] hdfs.DataStreamer(1850): Abandoning BP-925157896-172.17.0.2-1731243630326:blk_1073741903_1089 2024-11-10T13:01:09,051 WARN [Thread-1071 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33513,DS-b5f80209-4e09-49c3-8a4c-611ce85f484d,DISK] 2024-11-10T13:01:09,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34495 is added to blk_1073741904_1090 (size=5424) 2024-11-10T13:01:09,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44447 is added to blk_1073741904_1090 (size=5424) 2024-11-10T13:01:09,057 INFO [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/hbase/meta/1588230740/.tmp/table/970516b74bb84bdba2ff777044c1cc43 2024-11-10T13:01:09,063 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/hbase/meta/1588230740/.tmp/info/86cc2d49744241d59facad92d8b4c553 as hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/hbase/meta/1588230740/info/86cc2d49744241d59facad92d8b4c553 2024-11-10T13:01:09,069 INFO [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/hbase/meta/1588230740/info/86cc2d49744241d59facad92d8b4c553, entries=10, sequenceid=11, filesize=6.9 K 2024-11-10T13:01:09,070 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/hbase/meta/1588230740/.tmp/ns/ca84faba58e5493aa9574aee39080865 as hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/hbase/meta/1588230740/ns/ca84faba58e5493aa9574aee39080865 2024-11-10T13:01:09,075 INFO [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/hbase/meta/1588230740/ns/ca84faba58e5493aa9574aee39080865, entries=2, sequenceid=11, filesize=5.0 K 2024-11-10T13:01:09,076 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/hbase/meta/1588230740/.tmp/table/970516b74bb84bdba2ff777044c1cc43 as hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/hbase/meta/1588230740/table/970516b74bb84bdba2ff777044c1cc43 2024-11-10T13:01:09,080 INFO [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/hbase/meta/1588230740/table/970516b74bb84bdba2ff777044c1cc43, entries=2, sequenceid=11, filesize=5.3 K 2024-11-10T13:01:09,082 INFO [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 141ms, sequenceid=11, compaction requested=false 2024-11-10T13:01:09,086 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-10T13:01:09,087 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-10T13:01:09,087 INFO [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-10T13:01:09,087 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731243668940Running coprocessor pre-close hooks at 1731243668940Disabling compacts and flushes for region at 1731243668940Disabling writes for close at 1731243668941 (+1 ms)Obtaining lock to block concurrent updates at 1731243668941Preparing flush snapshotting stores in 1588230740 at 1731243668941Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1731243668941Flushing stores of hbase:meta,,1.1588230740 at 1731243668965 (+24 ms)Flushing 1588230740/info: creating writer at 1731243668965Flushing 1588230740/info: appending metadata at 1731243668982 (+17 ms)Flushing 1588230740/info: closing flushed file at 1731243668982Flushing 1588230740/ns: creating writer at 1731243668994 (+12 ms)Flushing 1588230740/ns: appending metadata at 1731243669008 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1731243669008Flushing 1588230740/table: creating writer at 1731243669025 (+17 ms)Flushing 1588230740/table: appending metadata at 1731243669048 (+23 ms)Flushing 1588230740/table: closing flushed file at 1731243669048Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7698fda3: reopening flushed file at 1731243669062 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@385a1c08: reopening flushed file at 1731243669069 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@535eb791: reopening flushed file at 1731243669075 (+6 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 141ms, sequenceid=11, compaction requested=false at 1731243669082 (+7 ms)Writing region close event to WAL at 1731243669083 (+1 ms)Running coprocessor post-close hooks at 1731243669087 (+4 ms)Closed at 1731243669087 2024-11-10T13:01:09,087 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-10T13:01:09,141 INFO [RS:0;3857ccc89b65:44789 {}] regionserver.HRegionServer(976): stopping server 3857ccc89b65,44789,1731243631098; all regions closed. 2024-11-10T13:01:09,141 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:09,141 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:09,141 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:09,142 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:09,142 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:09,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34495 is added to blk_1073741899_1084 (size=825) 2024-11-10T13:01:09,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44447 is added to blk_1073741899_1084 (size=825) 2024-11-10T13:01:09,156 INFO [regionserver/3857ccc89b65:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-10T13:01:09,156 INFO [regionserver/3857ccc89b65:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-10T13:01:09,366 INFO [regionserver/3857ccc89b65:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T13:01:09,405 INFO [regionserver/3857ccc89b65:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-10T13:01:09,405 INFO [regionserver/3857ccc89b65:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-10T13:01:09,616 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@9f37e[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34495, datanodeUuid=a1f556c6-aae3-4ee9-826c-02aac278b22e, infoPort=46285, infoSecurePort=0, ipcPort=42993, storageInfo=lv=-57;cid=testClusterID;nsid=989579585;c=1731243630326):Failed to transfer BP-925157896-172.17.0.2-1731243630326:blk_1073741876_1059 to 127.0.0.1:33513 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:01:10,138 INFO [regionserver/3857ccc89b65:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T13:01:10,179 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@2aad76bd[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44447, datanodeUuid=07be666f-232a-4760-aea5-e1fb3b4f419c, infoPort=38535, infoSecurePort=0, ipcPort=39161, storageInfo=lv=-57;cid=testClusterID;nsid=989579585;c=1731243630326):Failed to transfer BP-925157896-172.17.0.2-1731243630326:blk_1073741836_1012 to 127.0.0.1:33513 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:01:10,179 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@74bebcc1[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44447, datanodeUuid=07be666f-232a-4760-aea5-e1fb3b4f419c, infoPort=38535, infoSecurePort=0, ipcPort=39161, storageInfo=lv=-57;cid=testClusterID;nsid=989579585;c=1731243630326):Failed to transfer BP-925157896-172.17.0.2-1731243630326:blk_1073741832_1008 to 127.0.0.1:33513 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:01:11,179 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@2aad76bd[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44447, datanodeUuid=07be666f-232a-4760-aea5-e1fb3b4f419c, infoPort=38535, infoSecurePort=0, ipcPort=39161, storageInfo=lv=-57;cid=testClusterID;nsid=989579585;c=1731243630326):Failed to transfer BP-925157896-172.17.0.2-1731243630326:blk_1073741826_1002 to 127.0.0.1:33513 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:01:11,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34495 is added to blk_1073741828_1004 (size=1189) 2024-11-10T13:01:11,963 INFO [master/3857ccc89b65:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-10T13:01:11,963 INFO [master/3857ccc89b65:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-10T13:01:12,309 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-10T13:01:12,310 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-10T13:01:12,310 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-10T13:01:12,944 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 after 4002ms 2024-11-10T13:01:12,959 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta after 4001ms 2024-11-10T13:01:13,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34495 is added to blk_1073741825_1001 (size=7) 2024-11-10T13:01:13,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34495 is added to blk_1073741827_1003 (size=196) 2024-11-10T13:01:13,942 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-10T13:01:13,944 DEBUG [RS:1;3857ccc89b65:38535 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/oldWALs 2024-11-10T13:01:13,945 INFO [RS:1;3857ccc89b65:38535 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3857ccc89b65%2C38535%2C1731243632054:(num 1731243632271) 2024-11-10T13:01:13,945 DEBUG [RS:1;3857ccc89b65:38535 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T13:01:13,945 INFO [RS:1;3857ccc89b65:38535 {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T13:01:13,945 INFO [RS:1;3857ccc89b65:38535 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-10T13:01:13,945 INFO [RS:1;3857ccc89b65:38535 {}] hbase.ChoreService(370): Chore service for: regionserver/3857ccc89b65:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-10T13:01:13,945 INFO [RS:1;3857ccc89b65:38535 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-10T13:01:13,945 INFO [regionserver/3857ccc89b65:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-10T13:01:13,945 INFO [RS:1;3857ccc89b65:38535 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-10T13:01:13,945 INFO [RS:1;3857ccc89b65:38535 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-10T13:01:13,945 INFO [RS:1;3857ccc89b65:38535 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-10T13:01:13,945 INFO [RS:1;3857ccc89b65:38535 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38535 2024-11-10T13:01:13,947 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38535-0x10101f74c490002, quorum=127.0.0.1:56510, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3857ccc89b65,38535,1731243632054 2024-11-10T13:01:13,947 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44143-0x10101f74c490000, quorum=127.0.0.1:56510, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-10T13:01:13,947 INFO [RS:1;3857ccc89b65:38535 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-10T13:01:13,948 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3857ccc89b65,38535,1731243632054] 2024-11-10T13:01:13,949 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:13,950 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3857ccc89b65,38535,1731243632054 already deleted, retry=false 2024-11-10T13:01:13,951 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3857ccc89b65,38535,1731243632054 expired; onlineServers=1 2024-11-10T13:01:13,966 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:13,986 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:13,986 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:13,987 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:13,987 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:13,988 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:13,995 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:13,997 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:14,048 INFO [RS:1;3857ccc89b65:38535 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-10T13:01:14,048 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38535-0x10101f74c490002, quorum=127.0.0.1:56510, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T13:01:14,049 INFO [RS:1;3857ccc89b65:38535 {}] regionserver.HRegionServer(1031): Exiting; stopping=3857ccc89b65,38535,1731243632054; zookeeper connection closed. 2024-11-10T13:01:14,049 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38535-0x10101f74c490002, quorum=127.0.0.1:56510, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T13:01:14,049 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7c44c8c7 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7c44c8c7 2024-11-10T13:01:14,142 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-10T13:01:14,145 DEBUG [RS:0;3857ccc89b65:44789 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/oldWALs 2024-11-10T13:01:14,145 INFO [RS:0;3857ccc89b65:44789 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3857ccc89b65%2C44789%2C1731243631098.meta:.meta(num 1731243668941) 2024-11-10T13:01:14,146 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:14,146 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:14,146 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:14,146 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:14,146 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:14,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44447 is added to blk_1073741892_1076 (size=16308) 2024-11-10T13:01:14,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34495 is added to blk_1073741892_1076 (size=16308) 2024-11-10T13:01:14,151 DEBUG [RS:0;3857ccc89b65:44789 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/oldWALs 2024-11-10T13:01:14,151 INFO [RS:0;3857ccc89b65:44789 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3857ccc89b65%2C44789%2C1731243631098:(num 1731243668495) 2024-11-10T13:01:14,151 DEBUG [RS:0;3857ccc89b65:44789 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T13:01:14,151 INFO [RS:0;3857ccc89b65:44789 {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T13:01:14,151 INFO [RS:0;3857ccc89b65:44789 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-10T13:01:14,151 INFO [RS:0;3857ccc89b65:44789 {}] hbase.ChoreService(370): Chore service for: regionserver/3857ccc89b65:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-10T13:01:14,151 INFO [RS:0;3857ccc89b65:44789 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-10T13:01:14,151 INFO [regionserver/3857ccc89b65:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-10T13:01:14,151 INFO [RS:0;3857ccc89b65:44789 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44789 2024-11-10T13:01:14,153 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44789-0x10101f74c490001, quorum=127.0.0.1:56510, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3857ccc89b65,44789,1731243631098 2024-11-10T13:01:14,153 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44143-0x10101f74c490000, quorum=127.0.0.1:56510, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-10T13:01:14,153 INFO [RS:0;3857ccc89b65:44789 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-10T13:01:14,154 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3857ccc89b65,44789,1731243631098] 2024-11-10T13:01:14,155 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3857ccc89b65,44789,1731243631098 already deleted, retry=false 2024-11-10T13:01:14,155 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3857ccc89b65,44789,1731243631098 expired; onlineServers=0 2024-11-10T13:01:14,156 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '3857ccc89b65,44143,1731243631050' ***** 2024-11-10T13:01:14,156 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-10T13:01:14,156 INFO [M:0;3857ccc89b65:44143 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-10T13:01:14,156 INFO [M:0;3857ccc89b65:44143 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-10T13:01:14,156 DEBUG [M:0;3857ccc89b65:44143 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-10T13:01:14,156 DEBUG [M:0;3857ccc89b65:44143 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-10T13:01:14,156 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-10T13:01:14,156 DEBUG [master/3857ccc89b65:0:becomeActiveMaster-HFileCleaner.large.0-1731243631288 {}] cleaner.HFileCleaner(306): Exit Thread[master/3857ccc89b65:0:becomeActiveMaster-HFileCleaner.large.0-1731243631288,5,FailOnTimeoutGroup] 2024-11-10T13:01:14,156 DEBUG [master/3857ccc89b65:0:becomeActiveMaster-HFileCleaner.small.0-1731243631288 {}] cleaner.HFileCleaner(306): Exit Thread[master/3857ccc89b65:0:becomeActiveMaster-HFileCleaner.small.0-1731243631288,5,FailOnTimeoutGroup] 2024-11-10T13:01:14,156 INFO [M:0;3857ccc89b65:44143 {}] hbase.ChoreService(370): Chore service for: master/3857ccc89b65:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-10T13:01:14,156 INFO [M:0;3857ccc89b65:44143 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-10T13:01:14,156 DEBUG [M:0;3857ccc89b65:44143 {}] master.HMaster(1795): Stopping service threads 2024-11-10T13:01:14,156 INFO [M:0;3857ccc89b65:44143 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-10T13:01:14,156 INFO [M:0;3857ccc89b65:44143 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-10T13:01:14,157 INFO [M:0;3857ccc89b65:44143 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-10T13:01:14,157 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-10T13:01:14,158 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44143-0x10101f74c490000, quorum=127.0.0.1:56510, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-10T13:01:14,158 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44143-0x10101f74c490000, quorum=127.0.0.1:56510, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:01:14,159 DEBUG [M:0;3857ccc89b65:44143 {}] zookeeper.ZKUtil(347): master:44143-0x10101f74c490000, quorum=127.0.0.1:56510, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-10T13:01:14,159 WARN [M:0;3857ccc89b65:44143 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-10T13:01:14,159 INFO [M:0;3857ccc89b65:44143 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/.lastflushedseqids 2024-11-10T13:01:14,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34495 is added to blk_1073741905_1091 (size=130) 2024-11-10T13:01:14,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44447 is added to blk_1073741905_1091 (size=130) 2024-11-10T13:01:14,165 INFO [M:0;3857ccc89b65:44143 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-10T13:01:14,165 INFO [M:0;3857ccc89b65:44143 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-10T13:01:14,165 DEBUG [M:0;3857ccc89b65:44143 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-10T13:01:14,165 INFO [M:0;3857ccc89b65:44143 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T13:01:14,165 DEBUG [M:0;3857ccc89b65:44143 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T13:01:14,165 DEBUG [M:0;3857ccc89b65:44143 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-10T13:01:14,165 DEBUG [M:0;3857ccc89b65:44143 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T13:01:14,165 INFO [M:0;3857ccc89b65:44143 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.25 KB heapSize=29.49 KB 2024-11-10T13:01:14,181 DEBUG [M:0;3857ccc89b65:44143 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a8228c80b46d4c958db76989d28bcafd is 82, key is hbase:meta,,1/info:regioninfo/1731243631929/Put/seqid=0 2024-11-10T13:01:14,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34495 is added to blk_1073741906_1092 (size=5672) 2024-11-10T13:01:14,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44447 is added to blk_1073741906_1092 (size=5672) 2024-11-10T13:01:14,186 INFO [M:0;3857ccc89b65:44143 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a8228c80b46d4c958db76989d28bcafd 2024-11-10T13:01:14,206 DEBUG [M:0;3857ccc89b65:44143 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/567891e9a7cf4954b0094bd4ddb30081 is 774, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731243632551/Put/seqid=0 2024-11-10T13:01:14,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34495 is added to blk_1073741907_1093 (size=6255) 2024-11-10T13:01:14,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44447 is added to blk_1073741907_1093 (size=6255) 2024-11-10T13:01:14,211 INFO [M:0;3857ccc89b65:44143 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/567891e9a7cf4954b0094bd4ddb30081 2024-11-10T13:01:14,216 INFO [M:0;3857ccc89b65:44143 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 567891e9a7cf4954b0094bd4ddb30081 2024-11-10T13:01:14,237 DEBUG [M:0;3857ccc89b65:44143 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/697799ce17754c088f5b67707b0ff6a1 is 69, key is 3857ccc89b65,38535,1731243632054/rs:state/1731243632119/Put/seqid=0 2024-11-10T13:01:14,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34495 is added to blk_1073741908_1094 (size=5224) 2024-11-10T13:01:14,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44447 is added to blk_1073741908_1094 (size=5224) 2024-11-10T13:01:14,242 INFO [M:0;3857ccc89b65:44143 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/697799ce17754c088f5b67707b0ff6a1 2024-11-10T13:01:14,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44789-0x10101f74c490001, quorum=127.0.0.1:56510, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T13:01:14,254 INFO [RS:0;3857ccc89b65:44789 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-10T13:01:14,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44789-0x10101f74c490001, quorum=127.0.0.1:56510, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T13:01:14,254 INFO [RS:0;3857ccc89b65:44789 {}] regionserver.HRegionServer(1031): Exiting; stopping=3857ccc89b65,44789,1731243631098; zookeeper connection closed. 2024-11-10T13:01:14,255 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3897d629 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3897d629 2024-11-10T13:01:14,255 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-10T13:01:14,260 DEBUG [M:0;3857ccc89b65:44143 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e95f00f88d184db9aab542000de3fde4 is 52, key is load_balancer_on/state:d/1731243632029/Put/seqid=0 2024-11-10T13:01:14,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44447 is added to blk_1073741909_1095 (size=5056) 2024-11-10T13:01:14,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34495 is added to blk_1073741909_1095 (size=5056) 2024-11-10T13:01:14,265 INFO [M:0;3857ccc89b65:44143 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e95f00f88d184db9aab542000de3fde4 2024-11-10T13:01:14,270 DEBUG [M:0;3857ccc89b65:44143 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a8228c80b46d4c958db76989d28bcafd as hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a8228c80b46d4c958db76989d28bcafd 2024-11-10T13:01:14,274 INFO [M:0;3857ccc89b65:44143 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a8228c80b46d4c958db76989d28bcafd, entries=8, sequenceid=60, filesize=5.5 K 2024-11-10T13:01:14,275 DEBUG [M:0;3857ccc89b65:44143 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/567891e9a7cf4954b0094bd4ddb30081 as hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/567891e9a7cf4954b0094bd4ddb30081 2024-11-10T13:01:14,279 INFO [M:0;3857ccc89b65:44143 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 567891e9a7cf4954b0094bd4ddb30081 2024-11-10T13:01:14,280 INFO [M:0;3857ccc89b65:44143 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/567891e9a7cf4954b0094bd4ddb30081, entries=6, sequenceid=60, filesize=6.1 K 2024-11-10T13:01:14,280 DEBUG [M:0;3857ccc89b65:44143 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/697799ce17754c088f5b67707b0ff6a1 as hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/697799ce17754c088f5b67707b0ff6a1 2024-11-10T13:01:14,285 INFO [M:0;3857ccc89b65:44143 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/697799ce17754c088f5b67707b0ff6a1, entries=2, sequenceid=60, filesize=5.1 K 2024-11-10T13:01:14,286 DEBUG [M:0;3857ccc89b65:44143 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e95f00f88d184db9aab542000de3fde4 as hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/e95f00f88d184db9aab542000de3fde4 2024-11-10T13:01:14,290 INFO [M:0;3857ccc89b65:44143 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/e95f00f88d184db9aab542000de3fde4, entries=1, sequenceid=60, filesize=4.9 K 2024-11-10T13:01:14,291 INFO [M:0;3857ccc89b65:44143 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 126ms, sequenceid=60, compaction requested=false 2024-11-10T13:01:14,293 INFO [M:0;3857ccc89b65:44143 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T13:01:14,293 DEBUG [M:0;3857ccc89b65:44143 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731243674165Disabling compacts and flushes for region at 1731243674165Disabling writes for close at 1731243674165Obtaining lock to block concurrent updates at 1731243674165Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731243674165Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23805, getHeapSize=30136, getOffHeapSize=0, getCellsCount=71 at 1731243674166 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731243674166Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731243674166Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731243674180 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731243674180Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731243674191 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731243674205 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731243674205Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731243674217 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731243674237 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731243674237Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731243674246 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731243674260 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731243674260Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2f02792a: reopening flushed file at 1731243674269 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@510e9bf4: reopening flushed file at 1731243674274 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2bc90bea: reopening flushed file at 1731243674280 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@36b2f307: reopening flushed file at 1731243674285 (+5 ms)Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 126ms, sequenceid=60, compaction requested=false at 1731243674292 (+7 ms)Writing region close event to WAL at 1731243674293 (+1 ms)Closed at 1731243674293 2024-11-10T13:01:14,294 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:14,294 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:14,294 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:14,294 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:14,294 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:14,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34495 is added to blk_1073741890_1073 (size=1045) 2024-11-10T13:01:14,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44447 is added to blk_1073741890_1073 (size=1045) 2024-11-10T13:01:14,498 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-10T13:01:14,513 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:14,513 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:14,514 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:14,514 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:14,514 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:14,515 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:14,518 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:14,520 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:14,950 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:14,959 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:15,201 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@cd4cd79 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-925157896-172.17.0.2-1731243630326:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:40725,null,null]) java.net.ConnectException: Call From 3857ccc89b65/172.17.0.2 to localhost:43009 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-10T13:01:15,310 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/MasterData/WALs/3857ccc89b65,44143,1731243631050/3857ccc89b65%2C44143%2C1731243631050.1731243631199 to hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/MasterData/oldWALs/3857ccc89b65%2C44143%2C1731243631050.1731243631199 2024-11-10T13:01:15,313 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/MasterData/oldWALs/3857ccc89b65%2C44143%2C1731243631050.1731243631199 to hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/oldWALs/3857ccc89b65%2C44143%2C1731243631050.1731243631199$masterlocalwal$ 2024-11-10T13:01:15,314 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-10T13:01:15,314 INFO [M:0;3857ccc89b65:44143 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-10T13:01:15,314 INFO [M:0;3857ccc89b65:44143 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44143 2024-11-10T13:01:15,314 INFO [M:0;3857ccc89b65:44143 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-10T13:01:15,416 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44143-0x10101f74c490000, quorum=127.0.0.1:56510, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T13:01:15,416 INFO [M:0;3857ccc89b65:44143 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-10T13:01:15,416 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44143-0x10101f74c490000, quorum=127.0.0.1:56510, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T13:01:15,418 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2c79190f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T13:01:15,419 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@70a14b1c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T13:01:15,419 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T13:01:15,419 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@39eb7ad6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T13:01:15,419 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6869cf12{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/hadoop.log.dir/,STOPPED} 2024-11-10T13:01:15,420 WARN [BP-925157896-172.17.0.2-1731243630326 heartbeating to localhost/127.0.0.1:35903 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-10T13:01:15,420 WARN [BP-925157896-172.17.0.2-1731243630326 heartbeating to localhost/127.0.0.1:35903 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-925157896-172.17.0.2-1731243630326 (Datanode Uuid 07be666f-232a-4760-aea5-e1fb3b4f419c) service to localhost/127.0.0.1:35903 2024-11-10T13:01:15,420 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-10T13:01:15,421 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-10T13:01:15,420 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@b8c994 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-925157896-172.17.0.2-1731243630326:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:40725,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:43009 , LocalHost:localPort 3857ccc89b65/172.17.0.2:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-10T13:01:15,421 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@b8c994 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-925157896-172.17.0.2-1731243630326:blk_1073741837_1013; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:44447,null,null], DatanodeInfoWithStorage[127.0.0.1:40725,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: No block pool offer service for bpid=BP-925157896-172.17.0.2-1731243630326 2024-11-10T13:01:15,421 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@b8c994 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-925157896-172.17.0.2-1731243630326:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:44447,null,null]) java.io.IOException: No block pool offer service for bpid=BP-925157896-172.17.0.2-1731243630326 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:01:15,421 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@b8c994 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-925157896-172.17.0.2-1731243630326:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:40725,null,null]) java.io.IOException: No block pool offer service for bpid=BP-925157896-172.17.0.2-1731243630326 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:01:15,421 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/cluster_ca3516ed-3462-34cb-04d5-c3857eb856b7/data/data3/current/BP-925157896-172.17.0.2-1731243630326 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T13:01:15,421 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@b8c994 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-925157896-172.17.0.2-1731243630326:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:44447,null,null], DatanodeInfoWithStorage[127.0.0.1:40725,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-925157896-172.17.0.2-1731243630326:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:44447,null,null], DatanodeInfoWithStorage[127.0.0.1:40725,null,null]] 2024-11-10T13:01:15,421 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/cluster_ca3516ed-3462-34cb-04d5-c3857eb856b7/data/data4/current/BP-925157896-172.17.0.2-1731243630326 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T13:01:15,422 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-10T13:01:15,423 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4438143d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T13:01:15,424 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@32dac098{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T13:01:15,424 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T13:01:15,424 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@519de6b2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T13:01:15,424 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@83c4e47{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/hadoop.log.dir/,STOPPED} 2024-11-10T13:01:15,425 WARN [BP-925157896-172.17.0.2-1731243630326 heartbeating to localhost/127.0.0.1:35903 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-10T13:01:15,425 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-10T13:01:15,425 WARN [BP-925157896-172.17.0.2-1731243630326 heartbeating to localhost/127.0.0.1:35903 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-925157896-172.17.0.2-1731243630326 (Datanode Uuid a1f556c6-aae3-4ee9-826c-02aac278b22e) service to localhost/127.0.0.1:35903 2024-11-10T13:01:15,425 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-10T13:01:15,426 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/cluster_ca3516ed-3462-34cb-04d5-c3857eb856b7/data/data7/current/BP-925157896-172.17.0.2-1731243630326 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T13:01:15,426 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/cluster_ca3516ed-3462-34cb-04d5-c3857eb856b7/data/data8/current/BP-925157896-172.17.0.2-1731243630326 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T13:01:15,426 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-10T13:01:15,437 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@c00ef51{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-10T13:01:15,437 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@73b9709e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T13:01:15,437 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T13:01:15,437 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@42b52d44{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T13:01:15,438 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@142d24a0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/hadoop.log.dir/,STOPPED} 2024-11-10T13:01:15,445 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-10T13:01:15,472 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-10T13:01:15,480 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=155 (was 79) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35903 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$899/0x00007f4578bf5378.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35903 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: LeaseRenewer:jenkins@localhost:35903 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35903 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:35903 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35903 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35903 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:35903 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35903 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:41629 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$899/0x00007f4578bf5378.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:41629 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35903 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35903 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=450 (was 402) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=34 (was 67), ProcessCount=11 (was 11), AvailableMemoryMB=8664 (was 9112) 2024-11-10T13:01:15,487 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=155, OpenFileDescriptor=450, MaxFileDescriptor=1048576, SystemLoadAverage=34, ProcessCount=11, AvailableMemoryMB=8665 2024-11-10T13:01:15,487 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-10T13:01:15,487 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/hadoop.log.dir so I do NOT create it in target/test-data/fa461a4c-9859-1454-6fc7-21397fa2a460 2024-11-10T13:01:15,487 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/63be9bfb-18f2-3847-7cb6-502f2f411afa/hadoop.tmp.dir so I do NOT create it in target/test-data/fa461a4c-9859-1454-6fc7-21397fa2a460 2024-11-10T13:01:15,488 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fa461a4c-9859-1454-6fc7-21397fa2a460/cluster_ab5ceb79-a767-ff57-4846-5df93c62585a, deleteOnExit=true 2024-11-10T13:01:15,488 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-10T13:01:15,488 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fa461a4c-9859-1454-6fc7-21397fa2a460/test.cache.data in system properties and HBase conf 2024-11-10T13:01:15,488 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fa461a4c-9859-1454-6fc7-21397fa2a460/hadoop.tmp.dir in system properties and HBase conf 2024-11-10T13:01:15,488 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fa461a4c-9859-1454-6fc7-21397fa2a460/hadoop.log.dir in system properties and HBase conf 2024-11-10T13:01:15,488 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fa461a4c-9859-1454-6fc7-21397fa2a460/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-10T13:01:15,488 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fa461a4c-9859-1454-6fc7-21397fa2a460/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-10T13:01:15,488 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-10T13:01:15,488 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-10T13:01:15,489 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fa461a4c-9859-1454-6fc7-21397fa2a460/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-10T13:01:15,489 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fa461a4c-9859-1454-6fc7-21397fa2a460/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-10T13:01:15,489 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fa461a4c-9859-1454-6fc7-21397fa2a460/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-10T13:01:15,489 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fa461a4c-9859-1454-6fc7-21397fa2a460/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-10T13:01:15,489 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fa461a4c-9859-1454-6fc7-21397fa2a460/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-10T13:01:15,489 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fa461a4c-9859-1454-6fc7-21397fa2a460/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-10T13:01:15,489 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fa461a4c-9859-1454-6fc7-21397fa2a460/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-10T13:01:15,489 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fa461a4c-9859-1454-6fc7-21397fa2a460/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-10T13:01:15,489 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fa461a4c-9859-1454-6fc7-21397fa2a460/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-10T13:01:15,489 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fa461a4c-9859-1454-6fc7-21397fa2a460/nfs.dump.dir in system properties and HBase conf 2024-11-10T13:01:15,489 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fa461a4c-9859-1454-6fc7-21397fa2a460/java.io.tmpdir in system properties and HBase conf 2024-11-10T13:01:15,489 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fa461a4c-9859-1454-6fc7-21397fa2a460/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-10T13:01:15,489 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fa461a4c-9859-1454-6fc7-21397fa2a460/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-10T13:01:15,489 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fa461a4c-9859-1454-6fc7-21397fa2a460/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-10T13:01:15,502 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-10T13:01:15,572 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T13:01:15,576 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T13:01:15,577 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T13:01:15,577 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T13:01:15,577 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-10T13:01:15,578 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T13:01:15,578 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4a3a743f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fa461a4c-9859-1454-6fc7-21397fa2a460/hadoop.log.dir/,AVAILABLE} 2024-11-10T13:01:15,579 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@421a8f73{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T13:01:15,694 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@53a4c428{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fa461a4c-9859-1454-6fc7-21397fa2a460/java.io.tmpdir/jetty-localhost-40135-hadoop-hdfs-3_4_1-tests_jar-_-any-17514538707686183418/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-10T13:01:15,694 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@347a2271{HTTP/1.1, (http/1.1)}{localhost:40135} 2024-11-10T13:01:15,694 INFO [Time-limited test {}] server.Server(415): Started @148665ms 2024-11-10T13:01:15,707 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-10T13:01:15,774 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T13:01:15,779 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T13:01:15,780 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T13:01:15,780 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T13:01:15,780 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-10T13:01:15,781 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@354edf1e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fa461a4c-9859-1454-6fc7-21397fa2a460/hadoop.log.dir/,AVAILABLE} 2024-11-10T13:01:15,781 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@66c0323e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T13:01:15,895 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@60017892{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fa461a4c-9859-1454-6fc7-21397fa2a460/java.io.tmpdir/jetty-localhost-46341-hadoop-hdfs-3_4_1-tests_jar-_-any-6499298505682937546/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T13:01:15,895 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@fefca8b{HTTP/1.1, (http/1.1)}{localhost:46341} 2024-11-10T13:01:15,896 INFO [Time-limited test {}] server.Server(415): Started @148867ms 2024-11-10T13:01:15,897 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-10T13:01:15,927 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T13:01:15,930 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T13:01:15,931 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T13:01:15,931 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T13:01:15,931 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-10T13:01:15,932 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11b4bf4f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fa461a4c-9859-1454-6fc7-21397fa2a460/hadoop.log.dir/,AVAILABLE} 2024-11-10T13:01:15,932 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28441b3a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T13:01:15,951 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:15,960 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:15,997 WARN [Thread-1190 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fa461a4c-9859-1454-6fc7-21397fa2a460/cluster_ab5ceb79-a767-ff57-4846-5df93c62585a/data/data1/current/BP-1340826382-172.17.0.2-1731243675520/current, will proceed with Du for space computation calculation, 2024-11-10T13:01:15,997 WARN [Thread-1191 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fa461a4c-9859-1454-6fc7-21397fa2a460/cluster_ab5ceb79-a767-ff57-4846-5df93c62585a/data/data2/current/BP-1340826382-172.17.0.2-1731243675520/current, will proceed with Du for space computation calculation, 2024-11-10T13:01:16,016 WARN [Thread-1169 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-10T13:01:16,018 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x19d42152ef7f57b2 with lease ID 0x6bb04ef2f9fe43e7: Processing first storage report for DS-c3c98701-f27c-4c50-9f43-3fd4a25c73c1 from datanode DatanodeRegistration(127.0.0.1:37643, datanodeUuid=bb9fc405-77e9-4a4e-ade7-ab4468b8eae9, infoPort=36101, infoSecurePort=0, ipcPort=34051, storageInfo=lv=-57;cid=testClusterID;nsid=833709798;c=1731243675520) 2024-11-10T13:01:16,018 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x19d42152ef7f57b2 with lease ID 0x6bb04ef2f9fe43e7: from storage DS-c3c98701-f27c-4c50-9f43-3fd4a25c73c1 node DatanodeRegistration(127.0.0.1:37643, datanodeUuid=bb9fc405-77e9-4a4e-ade7-ab4468b8eae9, infoPort=36101, infoSecurePort=0, ipcPort=34051, storageInfo=lv=-57;cid=testClusterID;nsid=833709798;c=1731243675520), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T13:01:16,018 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x19d42152ef7f57b2 with lease ID 0x6bb04ef2f9fe43e7: Processing first storage report for DS-b026d2bf-270d-4cb4-b3f8-f8c6d717bec3 from datanode DatanodeRegistration(127.0.0.1:37643, datanodeUuid=bb9fc405-77e9-4a4e-ade7-ab4468b8eae9, infoPort=36101, infoSecurePort=0, ipcPort=34051, storageInfo=lv=-57;cid=testClusterID;nsid=833709798;c=1731243675520) 2024-11-10T13:01:16,019 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x19d42152ef7f57b2 with lease ID 0x6bb04ef2f9fe43e7: from storage DS-b026d2bf-270d-4cb4-b3f8-f8c6d717bec3 node DatanodeRegistration(127.0.0.1:37643, datanodeUuid=bb9fc405-77e9-4a4e-ade7-ab4468b8eae9, infoPort=36101, infoSecurePort=0, ipcPort=34051, storageInfo=lv=-57;cid=testClusterID;nsid=833709798;c=1731243675520), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T13:01:16,055 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6fab6db5{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fa461a4c-9859-1454-6fc7-21397fa2a460/java.io.tmpdir/jetty-localhost-45623-hadoop-hdfs-3_4_1-tests_jar-_-any-2332558350605943503/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T13:01:16,056 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@43a454f0{HTTP/1.1, (http/1.1)}{localhost:45623} 2024-11-10T13:01:16,056 INFO [Time-limited test {}] server.Server(415): Started @149027ms 2024-11-10T13:01:16,057 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-10T13:01:16,165 WARN [Thread-1216 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fa461a4c-9859-1454-6fc7-21397fa2a460/cluster_ab5ceb79-a767-ff57-4846-5df93c62585a/data/data3/current/BP-1340826382-172.17.0.2-1731243675520/current, will proceed with Du for space computation calculation, 2024-11-10T13:01:16,165 WARN [Thread-1217 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fa461a4c-9859-1454-6fc7-21397fa2a460/cluster_ab5ceb79-a767-ff57-4846-5df93c62585a/data/data4/current/BP-1340826382-172.17.0.2-1731243675520/current, will proceed with Du for space computation calculation, 2024-11-10T13:01:16,181 WARN [Thread-1205 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-10T13:01:16,183 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x61255b9c1dec411b with lease ID 0x6bb04ef2f9fe43e8: Processing first storage report for DS-79bf7baf-fb4e-439e-a27f-e6d2ca05f859 from datanode DatanodeRegistration(127.0.0.1:44651, datanodeUuid=50db00b8-c5f1-42c8-ba90-2937de5d7276, infoPort=37911, infoSecurePort=0, ipcPort=41221, storageInfo=lv=-57;cid=testClusterID;nsid=833709798;c=1731243675520) 2024-11-10T13:01:16,183 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x61255b9c1dec411b with lease ID 0x6bb04ef2f9fe43e8: from storage DS-79bf7baf-fb4e-439e-a27f-e6d2ca05f859 node DatanodeRegistration(127.0.0.1:44651, datanodeUuid=50db00b8-c5f1-42c8-ba90-2937de5d7276, infoPort=37911, infoSecurePort=0, ipcPort=41221, storageInfo=lv=-57;cid=testClusterID;nsid=833709798;c=1731243675520), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T13:01:16,183 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x61255b9c1dec411b with lease ID 0x6bb04ef2f9fe43e8: Processing first storage report for DS-1bf6b48c-5ee1-4e73-ab5d-436a592ebb52 from datanode DatanodeRegistration(127.0.0.1:44651, datanodeUuid=50db00b8-c5f1-42c8-ba90-2937de5d7276, infoPort=37911, infoSecurePort=0, ipcPort=41221, storageInfo=lv=-57;cid=testClusterID;nsid=833709798;c=1731243675520) 2024-11-10T13:01:16,183 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x61255b9c1dec411b with lease ID 0x6bb04ef2f9fe43e8: from storage DS-1bf6b48c-5ee1-4e73-ab5d-436a592ebb52 node DatanodeRegistration(127.0.0.1:44651, datanodeUuid=50db00b8-c5f1-42c8-ba90-2937de5d7276, infoPort=37911, infoSecurePort=0, ipcPort=41221, storageInfo=lv=-57;cid=testClusterID;nsid=833709798;c=1731243675520), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T13:01:16,282 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fa461a4c-9859-1454-6fc7-21397fa2a460 2024-11-10T13:01:16,285 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fa461a4c-9859-1454-6fc7-21397fa2a460/cluster_ab5ceb79-a767-ff57-4846-5df93c62585a/zookeeper_0, clientPort=56422, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fa461a4c-9859-1454-6fc7-21397fa2a460/cluster_ab5ceb79-a767-ff57-4846-5df93c62585a/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fa461a4c-9859-1454-6fc7-21397fa2a460/cluster_ab5ceb79-a767-ff57-4846-5df93c62585a/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-10T13:01:16,286 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=56422 2024-11-10T13:01:16,286 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:01:16,287 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:01:16,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741825_1001 (size=7) 2024-11-10T13:01:16,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44651 is added to blk_1073741825_1001 (size=7) 2024-11-10T13:01:16,299 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc with version=8 2024-11-10T13:01:16,299 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/hbase-staging 2024-11-10T13:01:16,301 INFO [Time-limited test {}] client.ConnectionUtils(128): master/3857ccc89b65:0 server-side Connection retries=45 2024-11-10T13:01:16,301 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T13:01:16,301 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-10T13:01:16,301 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-10T13:01:16,301 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T13:01:16,301 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-10T13:01:16,301 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-10T13:01:16,301 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-10T13:01:16,302 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37415 2024-11-10T13:01:16,303 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:37415 connecting to ZooKeeper ensemble=127.0.0.1:56422 2024-11-10T13:01:16,309 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:374150x0, quorum=127.0.0.1:56422, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-10T13:01:16,309 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:37415-0x10101f7fd0c0000 connected 2024-11-10T13:01:16,325 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:01:16,326 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:01:16,328 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37415-0x10101f7fd0c0000, quorum=127.0.0.1:56422, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T13:01:16,328 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc, hbase.cluster.distributed=false 2024-11-10T13:01:16,330 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37415-0x10101f7fd0c0000, quorum=127.0.0.1:56422, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-10T13:01:16,330 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37415 2024-11-10T13:01:16,330 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37415 2024-11-10T13:01:16,331 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37415 2024-11-10T13:01:16,331 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37415 2024-11-10T13:01:16,331 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37415 2024-11-10T13:01:16,347 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3857ccc89b65:0 server-side Connection retries=45 2024-11-10T13:01:16,347 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T13:01:16,347 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-10T13:01:16,347 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-10T13:01:16,347 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T13:01:16,347 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-10T13:01:16,347 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-10T13:01:16,348 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-10T13:01:16,348 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41129 2024-11-10T13:01:16,349 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41129 connecting to ZooKeeper ensemble=127.0.0.1:56422 2024-11-10T13:01:16,350 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:01:16,352 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:01:16,356 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:411290x0, quorum=127.0.0.1:56422, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-10T13:01:16,357 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:411290x0, quorum=127.0.0.1:56422, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T13:01:16,357 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41129-0x10101f7fd0c0001 connected 2024-11-10T13:01:16,357 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-10T13:01:16,358 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-10T13:01:16,358 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41129-0x10101f7fd0c0001, quorum=127.0.0.1:56422, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-10T13:01:16,359 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41129-0x10101f7fd0c0001, quorum=127.0.0.1:56422, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-10T13:01:16,360 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41129 2024-11-10T13:01:16,360 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41129 2024-11-10T13:01:16,360 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41129 2024-11-10T13:01:16,361 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41129 2024-11-10T13:01:16,361 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41129 2024-11-10T13:01:16,372 DEBUG [M:0;3857ccc89b65:37415 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;3857ccc89b65:37415 2024-11-10T13:01:16,372 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/3857ccc89b65,37415,1731243676301 2024-11-10T13:01:16,375 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41129-0x10101f7fd0c0001, quorum=127.0.0.1:56422, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T13:01:16,375 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37415-0x10101f7fd0c0000, quorum=127.0.0.1:56422, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T13:01:16,376 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37415-0x10101f7fd0c0000, quorum=127.0.0.1:56422, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/3857ccc89b65,37415,1731243676301 2024-11-10T13:01:16,377 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41129-0x10101f7fd0c0001, quorum=127.0.0.1:56422, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-10T13:01:16,377 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37415-0x10101f7fd0c0000, quorum=127.0.0.1:56422, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:01:16,377 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41129-0x10101f7fd0c0001, quorum=127.0.0.1:56422, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:01:16,377 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37415-0x10101f7fd0c0000, quorum=127.0.0.1:56422, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-10T13:01:16,378 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/3857ccc89b65,37415,1731243676301 from backup master directory 2024-11-10T13:01:16,379 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37415-0x10101f7fd0c0000, quorum=127.0.0.1:56422, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/3857ccc89b65,37415,1731243676301 2024-11-10T13:01:16,379 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37415-0x10101f7fd0c0000, quorum=127.0.0.1:56422, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T13:01:16,379 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41129-0x10101f7fd0c0001, quorum=127.0.0.1:56422, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T13:01:16,379 WARN [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-10T13:01:16,379 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=3857ccc89b65,37415,1731243676301 2024-11-10T13:01:16,383 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/hbase.id] with ID: f0bd1452-dd8d-436d-adf7-6b85f833c50a 2024-11-10T13:01:16,383 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/.tmp/hbase.id 2024-11-10T13:01:16,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741826_1002 (size=42) 2024-11-10T13:01:16,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44651 is added to blk_1073741826_1002 (size=42) 2024-11-10T13:01:16,389 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/.tmp/hbase.id]:[hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/hbase.id] 2024-11-10T13:01:16,400 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:01:16,400 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-10T13:01:16,401 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-10T13:01:16,403 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37415-0x10101f7fd0c0000, quorum=127.0.0.1:56422, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:01:16,403 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41129-0x10101f7fd0c0001, quorum=127.0.0.1:56422, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:01:16,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44651 is added to blk_1073741827_1003 (size=196) 2024-11-10T13:01:16,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741827_1003 (size=196) 2024-11-10T13:01:16,410 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-10T13:01:16,411 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-10T13:01:16,411 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-10T13:01:16,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44651 is added to blk_1073741828_1004 (size=1189) 2024-11-10T13:01:16,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741828_1004 (size=1189) 2024-11-10T13:01:16,418 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/MasterData/data/master/store 2024-11-10T13:01:16,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44651 is added to blk_1073741829_1005 (size=34) 2024-11-10T13:01:16,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741829_1005 (size=34) 2024-11-10T13:01:16,426 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T13:01:16,426 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-10T13:01:16,426 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T13:01:16,426 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T13:01:16,426 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-10T13:01:16,426 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T13:01:16,426 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T13:01:16,426 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731243676426Disabling compacts and flushes for region at 1731243676426Disabling writes for close at 1731243676426Writing region close event to WAL at 1731243676426Closed at 1731243676426 2024-11-10T13:01:16,427 WARN [master/3857ccc89b65:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/MasterData/data/master/store/.initializing 2024-11-10T13:01:16,427 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/MasterData/WALs/3857ccc89b65,37415,1731243676301 2024-11-10T13:01:16,429 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3857ccc89b65%2C37415%2C1731243676301, suffix=, logDir=hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/MasterData/WALs/3857ccc89b65,37415,1731243676301, archiveDir=hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/MasterData/oldWALs, maxLogs=10 2024-11-10T13:01:16,430 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3857ccc89b65%2C37415%2C1731243676301.1731243676429 2024-11-10T13:01:16,434 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/MasterData/WALs/3857ccc89b65,37415,1731243676301/3857ccc89b65%2C37415%2C1731243676301.1731243676429 2024-11-10T13:01:16,436 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37911:37911),(127.0.0.1/127.0.0.1:36101:36101)] 2024-11-10T13:01:16,440 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-10T13:01:16,441 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T13:01:16,441 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:01:16,441 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:01:16,442 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:01:16,443 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-10T13:01:16,443 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:01:16,444 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:01:16,444 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:01:16,445 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-10T13:01:16,445 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:01:16,445 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T13:01:16,446 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:01:16,447 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-10T13:01:16,447 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:01:16,447 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T13:01:16,447 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:01:16,448 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-10T13:01:16,449 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:01:16,449 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T13:01:16,449 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:01:16,450 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:01:16,450 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:01:16,451 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:01:16,451 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:01:16,452 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-10T13:01:16,453 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:01:16,454 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T13:01:16,455 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=706662, jitterRate=-0.10143378376960754}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-10T13:01:16,455 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731243676441Initializing all the Stores at 1731243676441Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731243676442 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731243676442Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731243676442Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731243676442Cleaning up temporary data from old regions at 1731243676451 (+9 ms)Region opened successfully at 1731243676455 (+4 ms) 2024-11-10T13:01:16,456 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-10T13:01:16,459 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7ae26f3a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3857ccc89b65/172.17.0.2:0 2024-11-10T13:01:16,460 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-10T13:01:16,460 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-10T13:01:16,460 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-10T13:01:16,460 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-10T13:01:16,461 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-10T13:01:16,461 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-10T13:01:16,461 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-10T13:01:16,464 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-10T13:01:16,465 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37415-0x10101f7fd0c0000, quorum=127.0.0.1:56422, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-10T13:01:16,467 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-10T13:01:16,467 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-10T13:01:16,468 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37415-0x10101f7fd0c0000, quorum=127.0.0.1:56422, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-10T13:01:16,469 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-10T13:01:16,469 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-10T13:01:16,470 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37415-0x10101f7fd0c0000, quorum=127.0.0.1:56422, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-10T13:01:16,471 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-10T13:01:16,473 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37415-0x10101f7fd0c0000, quorum=127.0.0.1:56422, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-10T13:01:16,474 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-10T13:01:16,476 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37415-0x10101f7fd0c0000, quorum=127.0.0.1:56422, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-10T13:01:16,479 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-10T13:01:16,480 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37415-0x10101f7fd0c0000, quorum=127.0.0.1:56422, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-10T13:01:16,480 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41129-0x10101f7fd0c0001, quorum=127.0.0.1:56422, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-10T13:01:16,480 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41129-0x10101f7fd0c0001, quorum=127.0.0.1:56422, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:01:16,480 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37415-0x10101f7fd0c0000, quorum=127.0.0.1:56422, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:01:16,481 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=3857ccc89b65,37415,1731243676301, sessionid=0x10101f7fd0c0000, setting cluster-up flag (Was=false) 2024-11-10T13:01:16,483 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37415-0x10101f7fd0c0000, quorum=127.0.0.1:56422, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:01:16,483 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41129-0x10101f7fd0c0001, quorum=127.0.0.1:56422, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:01:16,488 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-10T13:01:16,489 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3857ccc89b65,37415,1731243676301 2024-11-10T13:01:16,493 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37415-0x10101f7fd0c0000, quorum=127.0.0.1:56422, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:01:16,493 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41129-0x10101f7fd0c0001, quorum=127.0.0.1:56422, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:01:16,498 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-10T13:01:16,499 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3857ccc89b65,37415,1731243676301 2024-11-10T13:01:16,500 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-10T13:01:16,501 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-10T13:01:16,502 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-10T13:01:16,502 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-10T13:01:16,502 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 3857ccc89b65,37415,1731243676301 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-10T13:01:16,503 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/3857ccc89b65:0, corePoolSize=5, maxPoolSize=5 2024-11-10T13:01:16,503 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/3857ccc89b65:0, corePoolSize=5, maxPoolSize=5 2024-11-10T13:01:16,503 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/3857ccc89b65:0, corePoolSize=5, maxPoolSize=5 2024-11-10T13:01:16,503 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/3857ccc89b65:0, corePoolSize=5, maxPoolSize=5 2024-11-10T13:01:16,504 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/3857ccc89b65:0, corePoolSize=10, maxPoolSize=10 2024-11-10T13:01:16,504 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:01:16,504 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/3857ccc89b65:0, corePoolSize=2, maxPoolSize=2 2024-11-10T13:01:16,504 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:01:16,505 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-10T13:01:16,505 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-10T13:01:16,506 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731243706506 2024-11-10T13:01:16,506 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-10T13:01:16,506 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-10T13:01:16,506 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-10T13:01:16,506 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:01:16,507 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-10T13:01:16,507 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-10T13:01:16,507 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-10T13:01:16,507 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-10T13:01:16,507 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-10T13:01:16,507 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-10T13:01:16,507 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-10T13:01:16,507 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-10T13:01:16,508 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-10T13:01:16,508 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-10T13:01:16,508 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/3857ccc89b65:0:becomeActiveMaster-HFileCleaner.large.0-1731243676508,5,FailOnTimeoutGroup] 2024-11-10T13:01:16,512 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/3857ccc89b65:0:becomeActiveMaster-HFileCleaner.small.0-1731243676508,5,FailOnTimeoutGroup] 2024-11-10T13:01:16,512 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-10T13:01:16,512 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-10T13:01:16,513 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-10T13:01:16,513 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-10T13:01:16,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741831_1007 (size=1321) 2024-11-10T13:01:16,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44651 is added to blk_1073741831_1007 (size=1321) 2024-11-10T13:01:16,521 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-10T13:01:16,521 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc 2024-11-10T13:01:16,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44651 is added to blk_1073741832_1008 (size=32) 2024-11-10T13:01:16,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741832_1008 (size=32) 2024-11-10T13:01:16,532 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T13:01:16,533 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-10T13:01:16,534 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-10T13:01:16,534 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:01:16,535 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:01:16,535 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-10T13:01:16,536 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-10T13:01:16,536 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:01:16,537 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:01:16,537 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-10T13:01:16,538 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-10T13:01:16,538 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:01:16,538 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:01:16,538 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-10T13:01:16,539 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-10T13:01:16,539 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:01:16,540 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:01:16,540 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-10T13:01:16,541 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/data/hbase/meta/1588230740 2024-11-10T13:01:16,541 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/data/hbase/meta/1588230740 2024-11-10T13:01:16,542 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-10T13:01:16,542 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-10T13:01:16,543 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-10T13:01:16,544 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-10T13:01:16,546 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T13:01:16,547 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=733532, jitterRate=-0.06726709008216858}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-10T13:01:16,548 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731243676532Initializing all the Stores at 1731243676532Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731243676533 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731243676533Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731243676533Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731243676533Cleaning up temporary data from old regions at 1731243676542 (+9 ms)Region opened successfully at 1731243676547 (+5 ms) 2024-11-10T13:01:16,548 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-10T13:01:16,548 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-10T13:01:16,548 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-10T13:01:16,548 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-10T13:01:16,548 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-10T13:01:16,548 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-10T13:01:16,548 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731243676548Disabling compacts and flushes for region at 1731243676548Disabling writes for close at 1731243676548Writing region close event to WAL at 1731243676548Closed at 1731243676548 2024-11-10T13:01:16,549 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-10T13:01:16,550 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-10T13:01:16,550 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-10T13:01:16,551 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-10T13:01:16,552 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-10T13:01:16,562 INFO [RS:0;3857ccc89b65:41129 {}] regionserver.HRegionServer(746): ClusterId : f0bd1452-dd8d-436d-adf7-6b85f833c50a 2024-11-10T13:01:16,562 DEBUG [RS:0;3857ccc89b65:41129 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-10T13:01:16,566 DEBUG [RS:0;3857ccc89b65:41129 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-10T13:01:16,566 DEBUG [RS:0;3857ccc89b65:41129 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-10T13:01:16,567 DEBUG [RS:0;3857ccc89b65:41129 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-10T13:01:16,568 DEBUG [RS:0;3857ccc89b65:41129 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@54fe8438, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3857ccc89b65/172.17.0.2:0 2024-11-10T13:01:16,580 DEBUG [RS:0;3857ccc89b65:41129 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;3857ccc89b65:41129 2024-11-10T13:01:16,580 INFO [RS:0;3857ccc89b65:41129 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-10T13:01:16,580 INFO [RS:0;3857ccc89b65:41129 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-10T13:01:16,580 DEBUG [RS:0;3857ccc89b65:41129 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-10T13:01:16,580 INFO [RS:0;3857ccc89b65:41129 {}] regionserver.HRegionServer(2659): reportForDuty to master=3857ccc89b65,37415,1731243676301 with port=41129, startcode=1731243676346 2024-11-10T13:01:16,581 DEBUG [RS:0;3857ccc89b65:41129 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-10T13:01:16,583 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38435, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-10T13:01:16,583 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37415 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3857ccc89b65,41129,1731243676346 2024-11-10T13:01:16,583 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37415 {}] master.ServerManager(517): Registering regionserver=3857ccc89b65,41129,1731243676346 2024-11-10T13:01:16,585 DEBUG [RS:0;3857ccc89b65:41129 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc 2024-11-10T13:01:16,585 DEBUG [RS:0;3857ccc89b65:41129 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41197 2024-11-10T13:01:16,585 DEBUG [RS:0;3857ccc89b65:41129 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-10T13:01:16,587 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37415-0x10101f7fd0c0000, quorum=127.0.0.1:56422, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-10T13:01:16,587 DEBUG [RS:0;3857ccc89b65:41129 {}] zookeeper.ZKUtil(111): regionserver:41129-0x10101f7fd0c0001, quorum=127.0.0.1:56422, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3857ccc89b65,41129,1731243676346 2024-11-10T13:01:16,587 WARN [RS:0;3857ccc89b65:41129 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-10T13:01:16,587 INFO [RS:0;3857ccc89b65:41129 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-10T13:01:16,588 DEBUG [RS:0;3857ccc89b65:41129 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346 2024-11-10T13:01:16,588 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3857ccc89b65,41129,1731243676346] 2024-11-10T13:01:16,591 INFO [RS:0;3857ccc89b65:41129 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-10T13:01:16,593 INFO [RS:0;3857ccc89b65:41129 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-10T13:01:16,594 INFO [RS:0;3857ccc89b65:41129 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-10T13:01:16,594 INFO [RS:0;3857ccc89b65:41129 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T13:01:16,594 INFO [RS:0;3857ccc89b65:41129 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-10T13:01:16,595 INFO [RS:0;3857ccc89b65:41129 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-10T13:01:16,595 INFO [RS:0;3857ccc89b65:41129 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-10T13:01:16,595 DEBUG [RS:0;3857ccc89b65:41129 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:01:16,595 DEBUG [RS:0;3857ccc89b65:41129 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:01:16,595 DEBUG [RS:0;3857ccc89b65:41129 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:01:16,596 DEBUG [RS:0;3857ccc89b65:41129 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:01:16,596 DEBUG [RS:0;3857ccc89b65:41129 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:01:16,596 DEBUG [RS:0;3857ccc89b65:41129 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3857ccc89b65:0, corePoolSize=2, maxPoolSize=2 2024-11-10T13:01:16,596 DEBUG [RS:0;3857ccc89b65:41129 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:01:16,596 DEBUG [RS:0;3857ccc89b65:41129 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:01:16,596 DEBUG [RS:0;3857ccc89b65:41129 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:01:16,596 DEBUG [RS:0;3857ccc89b65:41129 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:01:16,596 DEBUG [RS:0;3857ccc89b65:41129 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:01:16,596 DEBUG [RS:0;3857ccc89b65:41129 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:01:16,596 DEBUG [RS:0;3857ccc89b65:41129 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3857ccc89b65:0, corePoolSize=3, maxPoolSize=3 2024-11-10T13:01:16,596 DEBUG [RS:0;3857ccc89b65:41129 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3857ccc89b65:0, corePoolSize=3, maxPoolSize=3 2024-11-10T13:01:16,600 INFO [RS:0;3857ccc89b65:41129 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T13:01:16,600 INFO [RS:0;3857ccc89b65:41129 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T13:01:16,600 INFO [RS:0;3857ccc89b65:41129 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T13:01:16,600 INFO [RS:0;3857ccc89b65:41129 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-10T13:01:16,600 INFO [RS:0;3857ccc89b65:41129 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-10T13:01:16,600 INFO [RS:0;3857ccc89b65:41129 {}] hbase.ChoreService(168): Chore ScheduledChore name=3857ccc89b65,41129,1731243676346-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-10T13:01:16,624 INFO [RS:0;3857ccc89b65:41129 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-10T13:01:16,624 INFO [RS:0;3857ccc89b65:41129 {}] hbase.ChoreService(168): Chore ScheduledChore name=3857ccc89b65,41129,1731243676346-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T13:01:16,624 INFO [RS:0;3857ccc89b65:41129 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T13:01:16,624 INFO [RS:0;3857ccc89b65:41129 {}] regionserver.Replication(171): 3857ccc89b65,41129,1731243676346 started 2024-11-10T13:01:16,646 INFO [RS:0;3857ccc89b65:41129 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T13:01:16,646 INFO [RS:0;3857ccc89b65:41129 {}] regionserver.HRegionServer(1482): Serving as 3857ccc89b65,41129,1731243676346, RpcServer on 3857ccc89b65/172.17.0.2:41129, sessionid=0x10101f7fd0c0001 2024-11-10T13:01:16,646 DEBUG [RS:0;3857ccc89b65:41129 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-10T13:01:16,646 DEBUG [RS:0;3857ccc89b65:41129 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3857ccc89b65,41129,1731243676346 2024-11-10T13:01:16,646 DEBUG [RS:0;3857ccc89b65:41129 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3857ccc89b65,41129,1731243676346' 2024-11-10T13:01:16,647 DEBUG [RS:0;3857ccc89b65:41129 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-10T13:01:16,647 DEBUG [RS:0;3857ccc89b65:41129 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-10T13:01:16,648 DEBUG [RS:0;3857ccc89b65:41129 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-10T13:01:16,648 DEBUG [RS:0;3857ccc89b65:41129 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-10T13:01:16,648 DEBUG [RS:0;3857ccc89b65:41129 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3857ccc89b65,41129,1731243676346 2024-11-10T13:01:16,648 DEBUG [RS:0;3857ccc89b65:41129 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3857ccc89b65,41129,1731243676346' 2024-11-10T13:01:16,648 DEBUG [RS:0;3857ccc89b65:41129 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-10T13:01:16,648 DEBUG [RS:0;3857ccc89b65:41129 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-10T13:01:16,649 DEBUG [RS:0;3857ccc89b65:41129 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-10T13:01:16,649 INFO [RS:0;3857ccc89b65:41129 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-10T13:01:16,649 INFO [RS:0;3857ccc89b65:41129 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-10T13:01:16,702 WARN [3857ccc89b65:37415 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-10T13:01:16,751 INFO [RS:0;3857ccc89b65:41129 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3857ccc89b65%2C41129%2C1731243676346, suffix=, logDir=hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346, archiveDir=hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/oldWALs, maxLogs=32 2024-11-10T13:01:16,752 INFO [RS:0;3857ccc89b65:41129 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3857ccc89b65%2C41129%2C1731243676346.1731243676752 2024-11-10T13:01:16,757 INFO [RS:0;3857ccc89b65:41129 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346/3857ccc89b65%2C41129%2C1731243676346.1731243676752 2024-11-10T13:01:16,758 DEBUG [RS:0;3857ccc89b65:41129 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37911:37911),(127.0.0.1/127.0.0.1:36101:36101)] 2024-11-10T13:01:16,951 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:16,953 DEBUG [3857ccc89b65:37415 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-10T13:01:16,953 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=3857ccc89b65,41129,1731243676346 2024-11-10T13:01:16,955 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3857ccc89b65,41129,1731243676346, state=OPENING 2024-11-10T13:01:16,956 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-10T13:01:16,959 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37415-0x10101f7fd0c0000, quorum=127.0.0.1:56422, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:01:16,959 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41129-0x10101f7fd0c0001, quorum=127.0.0.1:56422, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:01:16,959 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-10T13:01:16,959 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T13:01:16,959 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T13:01:16,959 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=3857ccc89b65,41129,1731243676346}] 2024-11-10T13:01:16,960 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:17,112 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-10T13:01:17,114 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59113, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-10T13:01:17,118 INFO [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-10T13:01:17,118 INFO [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-10T13:01:17,120 INFO [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3857ccc89b65%2C41129%2C1731243676346.meta, suffix=.meta, logDir=hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346, archiveDir=hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/oldWALs, maxLogs=32 2024-11-10T13:01:17,121 INFO [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 3857ccc89b65%2C41129%2C1731243676346.meta.1731243677120.meta 2024-11-10T13:01:17,126 INFO [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346/3857ccc89b65%2C41129%2C1731243676346.meta.1731243677120.meta 2024-11-10T13:01:17,131 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37911:37911),(127.0.0.1/127.0.0.1:36101:36101)] 2024-11-10T13:01:17,132 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-10T13:01:17,132 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-10T13:01:17,132 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-10T13:01:17,133 INFO [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-10T13:01:17,133 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-10T13:01:17,133 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T13:01:17,133 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-10T13:01:17,133 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-10T13:01:17,134 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-10T13:01:17,135 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-10T13:01:17,135 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:01:17,135 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:01:17,135 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-10T13:01:17,136 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-10T13:01:17,136 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:01:17,137 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:01:17,137 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-10T13:01:17,137 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-10T13:01:17,137 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:01:17,138 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:01:17,138 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-10T13:01:17,139 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-10T13:01:17,139 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:01:17,139 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:01:17,139 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-10T13:01:17,140 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/data/hbase/meta/1588230740 2024-11-10T13:01:17,141 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/data/hbase/meta/1588230740 2024-11-10T13:01:17,142 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-10T13:01:17,142 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-10T13:01:17,142 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-10T13:01:17,144 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-10T13:01:17,144 INFO [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=856328, jitterRate=0.0888778567314148}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-10T13:01:17,145 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-10T13:01:17,145 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731243677133Writing region info on filesystem at 1731243677133Initializing all the Stores at 1731243677134 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731243677134Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731243677134Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731243677134Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731243677134Cleaning up temporary data from old regions at 1731243677142 (+8 ms)Running coprocessor post-open hooks at 1731243677145 (+3 ms)Region opened successfully at 1731243677145 2024-11-10T13:01:17,146 INFO [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731243677112 2024-11-10T13:01:17,149 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-10T13:01:17,149 INFO [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-10T13:01:17,149 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=3857ccc89b65,41129,1731243676346 2024-11-10T13:01:17,150 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3857ccc89b65,41129,1731243676346, state=OPEN 2024-11-10T13:01:17,154 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37415-0x10101f7fd0c0000, quorum=127.0.0.1:56422, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-10T13:01:17,154 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41129-0x10101f7fd0c0001, quorum=127.0.0.1:56422, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-10T13:01:17,154 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=3857ccc89b65,41129,1731243676346 2024-11-10T13:01:17,154 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T13:01:17,154 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T13:01:17,157 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-10T13:01:17,157 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=3857ccc89b65,41129,1731243676346 in 195 msec 2024-11-10T13:01:17,160 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-10T13:01:17,160 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 607 msec 2024-11-10T13:01:17,160 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-10T13:01:17,160 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-10T13:01:17,162 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-10T13:01:17,162 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3857ccc89b65,41129,1731243676346, seqNum=-1] 2024-11-10T13:01:17,162 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-10T13:01:17,163 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41395, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-10T13:01:17,168 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 666 msec 2024-11-10T13:01:17,168 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731243677168, completionTime=-1 2024-11-10T13:01:17,168 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-10T13:01:17,168 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-10T13:01:17,170 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-10T13:01:17,170 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731243737170 2024-11-10T13:01:17,170 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731243797170 2024-11-10T13:01:17,170 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 1 msec 2024-11-10T13:01:17,171 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3857ccc89b65,37415,1731243676301-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T13:01:17,171 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3857ccc89b65,37415,1731243676301-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T13:01:17,171 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3857ccc89b65,37415,1731243676301-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T13:01:17,171 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-3857ccc89b65:37415, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T13:01:17,171 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-10T13:01:17,171 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-10T13:01:17,172 DEBUG [master/3857ccc89b65:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-10T13:01:17,174 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.795sec 2024-11-10T13:01:17,174 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-10T13:01:17,174 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-10T13:01:17,174 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-10T13:01:17,174 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-10T13:01:17,174 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-10T13:01:17,174 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3857ccc89b65,37415,1731243676301-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-10T13:01:17,174 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3857ccc89b65,37415,1731243676301-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-10T13:01:17,176 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-10T13:01:17,177 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-10T13:01:17,177 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3857ccc89b65,37415,1731243676301-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T13:01:17,263 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@77c83f30, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-10T13:01:17,263 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 3857ccc89b65,37415,-1 for getting cluster id 2024-11-10T13:01:17,263 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-10T13:01:17,265 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f0bd1452-dd8d-436d-adf7-6b85f833c50a' 2024-11-10T13:01:17,265 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-10T13:01:17,265 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f0bd1452-dd8d-436d-adf7-6b85f833c50a" 2024-11-10T13:01:17,266 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@39f3cfa8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-10T13:01:17,266 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3857ccc89b65,37415,-1] 2024-11-10T13:01:17,266 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-10T13:01:17,266 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T13:01:17,267 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44810, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-10T13:01:17,268 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4c10cfb7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-10T13:01:17,268 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-10T13:01:17,269 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3857ccc89b65,41129,1731243676346, seqNum=-1] 2024-11-10T13:01:17,270 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-10T13:01:17,271 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47996, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-10T13:01:17,273 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=3857ccc89b65,37415,1731243676301 2024-11-10T13:01:17,273 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:01:17,276 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-10T13:01:17,276 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-11-10T13:01:17,276 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-11-10T13:01:17,277 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-10T13:01:17,277 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is 3857ccc89b65,37415,1731243676301 2024-11-10T13:01:17,277 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@48ebaf46 2024-11-10T13:01:17,278 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-10T13:01:17,281 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44820, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-10T13:01:17,282 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37415 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-10T13:01:17,282 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37415 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-10T13:01:17,282 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37415 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-10T13:01:17,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37415 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-11-10T13:01:17,285 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-11-10T13:01:17,285 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:01:17,285 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37415 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-11-10T13:01:17,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37415 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-10T13:01:17,286 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-10T13:01:17,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741835_1011 (size=395) 2024-11-10T13:01:17,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44651 is added to blk_1073741835_1011 (size=395) 2024-11-10T13:01:17,295 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 4f147ccc4ef2ed9d79254f24aeb55dc7, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731243677282.4f147ccc4ef2ed9d79254f24aeb55dc7.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc 2024-11-10T13:01:17,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37643 is added to blk_1073741836_1012 (size=78) 2024-11-10T13:01:17,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44651 is added to blk_1073741836_1012 (size=78) 2024-11-10T13:01:17,301 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731243677282.4f147ccc4ef2ed9d79254f24aeb55dc7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T13:01:17,301 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing 4f147ccc4ef2ed9d79254f24aeb55dc7, disabling compactions & flushes 2024-11-10T13:01:17,301 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731243677282.4f147ccc4ef2ed9d79254f24aeb55dc7. 2024-11-10T13:01:17,301 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731243677282.4f147ccc4ef2ed9d79254f24aeb55dc7. 2024-11-10T13:01:17,301 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731243677282.4f147ccc4ef2ed9d79254f24aeb55dc7. after waiting 0 ms 2024-11-10T13:01:17,301 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731243677282.4f147ccc4ef2ed9d79254f24aeb55dc7. 2024-11-10T13:01:17,301 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731243677282.4f147ccc4ef2ed9d79254f24aeb55dc7. 2024-11-10T13:01:17,301 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for 4f147ccc4ef2ed9d79254f24aeb55dc7: Waiting for close lock at 1731243677301Disabling compacts and flushes for region at 1731243677301Disabling writes for close at 1731243677301Writing region close event to WAL at 1731243677301Closed at 1731243677301 2024-11-10T13:01:17,302 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-11-10T13:01:17,303 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1731243677282.4f147ccc4ef2ed9d79254f24aeb55dc7.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1731243677302"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731243677302"}]},"ts":"1731243677302"} 2024-11-10T13:01:17,305 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-10T13:01:17,306 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-10T13:01:17,307 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731243677306"}]},"ts":"1731243677306"} 2024-11-10T13:01:17,309 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-11-10T13:01:17,309 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=4f147ccc4ef2ed9d79254f24aeb55dc7, ASSIGN}] 2024-11-10T13:01:17,310 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=4f147ccc4ef2ed9d79254f24aeb55dc7, ASSIGN 2024-11-10T13:01:17,311 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=4f147ccc4ef2ed9d79254f24aeb55dc7, ASSIGN; state=OFFLINE, location=3857ccc89b65,41129,1731243676346; forceNewPlan=false, retain=false 2024-11-10T13:01:17,462 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=4f147ccc4ef2ed9d79254f24aeb55dc7, regionState=OPENING, regionLocation=3857ccc89b65,41129,1731243676346 2024-11-10T13:01:17,465 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=4f147ccc4ef2ed9d79254f24aeb55dc7, ASSIGN because future has completed 2024-11-10T13:01:17,465 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4f147ccc4ef2ed9d79254f24aeb55dc7, server=3857ccc89b65,41129,1731243676346}] 2024-11-10T13:01:17,622 INFO [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1731243677282.4f147ccc4ef2ed9d79254f24aeb55dc7. 2024-11-10T13:01:17,622 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 4f147ccc4ef2ed9d79254f24aeb55dc7, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731243677282.4f147ccc4ef2ed9d79254f24aeb55dc7.', STARTKEY => '', ENDKEY => ''} 2024-11-10T13:01:17,622 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart 4f147ccc4ef2ed9d79254f24aeb55dc7 2024-11-10T13:01:17,622 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731243677282.4f147ccc4ef2ed9d79254f24aeb55dc7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T13:01:17,622 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 4f147ccc4ef2ed9d79254f24aeb55dc7 2024-11-10T13:01:17,622 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 4f147ccc4ef2ed9d79254f24aeb55dc7 2024-11-10T13:01:17,624 INFO [StoreOpener-4f147ccc4ef2ed9d79254f24aeb55dc7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 4f147ccc4ef2ed9d79254f24aeb55dc7 2024-11-10T13:01:17,625 INFO [StoreOpener-4f147ccc4ef2ed9d79254f24aeb55dc7-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4f147ccc4ef2ed9d79254f24aeb55dc7 columnFamilyName info 2024-11-10T13:01:17,625 DEBUG [StoreOpener-4f147ccc4ef2ed9d79254f24aeb55dc7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:01:17,625 INFO [StoreOpener-4f147ccc4ef2ed9d79254f24aeb55dc7-1 {}] regionserver.HStore(327): Store=4f147ccc4ef2ed9d79254f24aeb55dc7/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T13:01:17,626 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 4f147ccc4ef2ed9d79254f24aeb55dc7 2024-11-10T13:01:17,626 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/data/default/TestLogRolling-testLogRollOnPipelineRestart/4f147ccc4ef2ed9d79254f24aeb55dc7 2024-11-10T13:01:17,626 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/data/default/TestLogRolling-testLogRollOnPipelineRestart/4f147ccc4ef2ed9d79254f24aeb55dc7 2024-11-10T13:01:17,627 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 4f147ccc4ef2ed9d79254f24aeb55dc7 2024-11-10T13:01:17,627 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 4f147ccc4ef2ed9d79254f24aeb55dc7 2024-11-10T13:01:17,628 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 4f147ccc4ef2ed9d79254f24aeb55dc7 2024-11-10T13:01:17,630 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/data/default/TestLogRolling-testLogRollOnPipelineRestart/4f147ccc4ef2ed9d79254f24aeb55dc7/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T13:01:17,631 INFO [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 4f147ccc4ef2ed9d79254f24aeb55dc7; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=748136, jitterRate=-0.04869595170021057}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-10T13:01:17,631 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 4f147ccc4ef2ed9d79254f24aeb55dc7 2024-11-10T13:01:17,631 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 4f147ccc4ef2ed9d79254f24aeb55dc7: Running coprocessor pre-open hook at 1731243677623Writing region info on filesystem at 1731243677623Initializing all the Stores at 1731243677623Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731243677623Cleaning up temporary data from old regions at 1731243677627 (+4 ms)Running coprocessor post-open hooks at 1731243677631 (+4 ms)Region opened successfully at 1731243677631 2024-11-10T13:01:17,632 INFO [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1731243677282.4f147ccc4ef2ed9d79254f24aeb55dc7., pid=6, masterSystemTime=1731243677618 2024-11-10T13:01:17,635 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1731243677282.4f147ccc4ef2ed9d79254f24aeb55dc7. 2024-11-10T13:01:17,635 INFO [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1731243677282.4f147ccc4ef2ed9d79254f24aeb55dc7. 2024-11-10T13:01:17,636 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=4f147ccc4ef2ed9d79254f24aeb55dc7, regionState=OPEN, openSeqNum=2, regionLocation=3857ccc89b65,41129,1731243676346 2024-11-10T13:01:17,638 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4f147ccc4ef2ed9d79254f24aeb55dc7, server=3857ccc89b65,41129,1731243676346 because future has completed 2024-11-10T13:01:17,641 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-10T13:01:17,641 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 4f147ccc4ef2ed9d79254f24aeb55dc7, server=3857ccc89b65,41129,1731243676346 in 174 msec 2024-11-10T13:01:17,644 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-10T13:01:17,644 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=4f147ccc4ef2ed9d79254f24aeb55dc7, ASSIGN in 332 msec 2024-11-10T13:01:17,645 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-10T13:01:17,645 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731243677645"}]},"ts":"1731243677645"} 2024-11-10T13:01:17,647 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-11-10T13:01:17,648 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-11-10T13:01:17,650 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 366 msec 2024-11-10T13:01:17,952 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:17,961 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:18,953 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:18,961 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:19,953 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:19,962 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:20,954 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:20,962 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:21,954 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:21,963 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:22,635 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-10T13:01:22,651 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:22,652 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:22,652 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:22,652 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:22,653 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:22,653 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:22,656 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:22,656 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:22,656 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:22,658 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:22,662 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-10T13:01:22,662 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-10T13:01:22,663 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-10T13:01:22,663 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-11-10T13:01:22,663 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-10T13:01:22,663 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-10T13:01:22,663 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-10T13:01:22,664 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-11-10T13:01:22,955 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:22,963 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:23,956 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:23,964 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:24,956 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:24,964 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:25,957 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:25,965 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:26,958 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:26,965 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:27,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37415 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-10T13:01:27,293 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-11-10T13:01:27,293 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-11-10T13:01:27,296 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-11-10T13:01:27,296 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1731243677282.4f147ccc4ef2ed9d79254f24aeb55dc7. 2024-11-10T13:01:27,300 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1731243677282.4f147ccc4ef2ed9d79254f24aeb55dc7., hostname=3857ccc89b65,41129,1731243676346, seqNum=2] 2024-11-10T13:01:27,958 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:27,966 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:28,959 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:28,966 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:29,302 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346/3857ccc89b65%2C41129%2C1731243676346.1731243676752 2024-11-10T13:01:29,303 WARN [ResponseProcessor for block BP-1340826382-172.17.0.2-1731243675520:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1340826382-172.17.0.2-1731243675520:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:01:29,303 WARN [ResponseProcessor for block BP-1340826382-172.17.0.2-1731243675520:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1340826382-172.17.0.2-1731243675520:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:01:29,303 WARN [ResponseProcessor for block BP-1340826382-172.17.0.2-1731243675520:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1340826382-172.17.0.2-1731243675520:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:01:29,303 WARN [DataStreamer for file /user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/MasterData/WALs/3857ccc89b65,37415,1731243676301/3857ccc89b65%2C37415%2C1731243676301.1731243676429 block BP-1340826382-172.17.0.2-1731243675520:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1340826382-172.17.0.2-1731243675520:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44651,DS-79bf7baf-fb4e-439e-a27f-e6d2ca05f859,DISK], DatanodeInfoWithStorage[127.0.0.1:37643,DS-c3c98701-f27c-4c50-9f43-3fd4a25c73c1,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44651,DS-79bf7baf-fb4e-439e-a27f-e6d2ca05f859,DISK]) is bad. 2024-11-10T13:01:29,303 WARN [DataStreamer for file /user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346/3857ccc89b65%2C41129%2C1731243676346.1731243676752 block BP-1340826382-172.17.0.2-1731243675520:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1340826382-172.17.0.2-1731243675520:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44651,DS-79bf7baf-fb4e-439e-a27f-e6d2ca05f859,DISK], DatanodeInfoWithStorage[127.0.0.1:37643,DS-c3c98701-f27c-4c50-9f43-3fd4a25c73c1,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44651,DS-79bf7baf-fb4e-439e-a27f-e6d2ca05f859,DISK]) is bad. 2024-11-10T13:01:29,304 WARN [DataStreamer for file /user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346/3857ccc89b65%2C41129%2C1731243676346.meta.1731243677120.meta block BP-1340826382-172.17.0.2-1731243675520:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1340826382-172.17.0.2-1731243675520:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44651,DS-79bf7baf-fb4e-439e-a27f-e6d2ca05f859,DISK], DatanodeInfoWithStorage[127.0.0.1:37643,DS-c3c98701-f27c-4c50-9f43-3fd4a25c73c1,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44651,DS-79bf7baf-fb4e-439e-a27f-e6d2ca05f859,DISK]) is bad. 2024-11-10T13:01:29,304 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_764420514_22 at /127.0.0.1:38974 [Receiving block BP-1340826382-172.17.0.2-1731243675520:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:44651:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38974 dst: /127.0.0.1:44651 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:01:29,304 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-602421861_22 at /127.0.0.1:38996 [Receiving block BP-1340826382-172.17.0.2-1731243675520:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:44651:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38996 dst: /127.0.0.1:44651 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:01:29,304 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-602421861_22 at /127.0.0.1:45890 [Receiving block BP-1340826382-172.17.0.2-1731243675520:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:37643:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45890 dst: /127.0.0.1:37643 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:01:29,304 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_764420514_22 at /127.0.0.1:45858 [Receiving block BP-1340826382-172.17.0.2-1731243675520:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:37643:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45858 dst: /127.0.0.1:37643 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:01:29,304 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-602421861_22 at /127.0.0.1:38984 [Receiving block BP-1340826382-172.17.0.2-1731243675520:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:44651:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38984 dst: /127.0.0.1:44651 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:01:29,304 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-602421861_22 at /127.0.0.1:45876 [Receiving block BP-1340826382-172.17.0.2-1731243675520:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:37643:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45876 dst: /127.0.0.1:37643 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:01:29,309 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6fab6db5{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T13:01:29,309 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@43a454f0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T13:01:29,310 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T13:01:29,310 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28441b3a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T13:01:29,310 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11b4bf4f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fa461a4c-9859-1454-6fc7-21397fa2a460/hadoop.log.dir/,STOPPED} 2024-11-10T13:01:29,312 WARN [BP-1340826382-172.17.0.2-1731243675520 heartbeating to localhost/127.0.0.1:41197 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-10T13:01:29,312 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-10T13:01:29,312 WARN [BP-1340826382-172.17.0.2-1731243675520 heartbeating to localhost/127.0.0.1:41197 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1340826382-172.17.0.2-1731243675520 (Datanode Uuid 50db00b8-c5f1-42c8-ba90-2937de5d7276) service to localhost/127.0.0.1:41197 2024-11-10T13:01:29,312 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-10T13:01:29,313 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fa461a4c-9859-1454-6fc7-21397fa2a460/cluster_ab5ceb79-a767-ff57-4846-5df93c62585a/data/data3/current/BP-1340826382-172.17.0.2-1731243675520 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T13:01:29,313 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fa461a4c-9859-1454-6fc7-21397fa2a460/cluster_ab5ceb79-a767-ff57-4846-5df93c62585a/data/data4/current/BP-1340826382-172.17.0.2-1731243675520 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T13:01:29,313 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-10T13:01:29,322 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T13:01:29,325 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T13:01:29,326 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T13:01:29,326 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T13:01:29,326 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-10T13:01:29,326 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@235b3635{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fa461a4c-9859-1454-6fc7-21397fa2a460/hadoop.log.dir/,AVAILABLE} 2024-11-10T13:01:29,327 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@431e378c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T13:01:29,441 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4ad0086e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fa461a4c-9859-1454-6fc7-21397fa2a460/java.io.tmpdir/jetty-localhost-46049-hadoop-hdfs-3_4_1-tests_jar-_-any-6014574615755463118/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T13:01:29,442 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2b5f8885{HTTP/1.1, (http/1.1)}{localhost:46049} 2024-11-10T13:01:29,442 INFO [Time-limited test {}] server.Server(415): Started @162413ms 2024-11-10T13:01:29,443 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-10T13:01:29,461 WARN [ResponseProcessor for block BP-1340826382-172.17.0.2-1731243675520:blk_1073741833_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1340826382-172.17.0.2-1731243675520:blk_1073741833_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:01:29,461 WARN [ResponseProcessor for block BP-1340826382-172.17.0.2-1731243675520:blk_1073741834_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1340826382-172.17.0.2-1731243675520:blk_1073741834_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:01:29,461 WARN [ResponseProcessor for block BP-1340826382-172.17.0.2-1731243675520:blk_1073741830_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1340826382-172.17.0.2-1731243675520:blk_1073741830_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:01:29,461 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-602421861_22 at /127.0.0.1:54086 [Receiving block BP-1340826382-172.17.0.2-1731243675520:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:37643:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54086 dst: /127.0.0.1:37643 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:01:29,461 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-602421861_22 at /127.0.0.1:54088 [Receiving block BP-1340826382-172.17.0.2-1731243675520:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:37643:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54088 dst: /127.0.0.1:37643 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:01:29,461 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_764420514_22 at /127.0.0.1:54078 [Receiving block BP-1340826382-172.17.0.2-1731243675520:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:37643:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54078 dst: /127.0.0.1:37643 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:01:29,464 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@60017892{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T13:01:29,465 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@fefca8b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T13:01:29,465 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T13:01:29,465 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@66c0323e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T13:01:29,465 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@354edf1e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fa461a4c-9859-1454-6fc7-21397fa2a460/hadoop.log.dir/,STOPPED} 2024-11-10T13:01:29,467 WARN [BP-1340826382-172.17.0.2-1731243675520 heartbeating to localhost/127.0.0.1:41197 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-10T13:01:29,467 WARN [BP-1340826382-172.17.0.2-1731243675520 heartbeating to localhost/127.0.0.1:41197 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1340826382-172.17.0.2-1731243675520 (Datanode Uuid bb9fc405-77e9-4a4e-ade7-ab4468b8eae9) service to localhost/127.0.0.1:41197 2024-11-10T13:01:29,467 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-10T13:01:29,467 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-10T13:01:29,467 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fa461a4c-9859-1454-6fc7-21397fa2a460/cluster_ab5ceb79-a767-ff57-4846-5df93c62585a/data/data1/current/BP-1340826382-172.17.0.2-1731243675520 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T13:01:29,467 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fa461a4c-9859-1454-6fc7-21397fa2a460/cluster_ab5ceb79-a767-ff57-4846-5df93c62585a/data/data2/current/BP-1340826382-172.17.0.2-1731243675520 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T13:01:29,467 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-10T13:01:29,481 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T13:01:29,485 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T13:01:29,486 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T13:01:29,486 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T13:01:29,486 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-10T13:01:29,486 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@673d3bba{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fa461a4c-9859-1454-6fc7-21397fa2a460/hadoop.log.dir/,AVAILABLE} 2024-11-10T13:01:29,487 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@62f6e774{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T13:01:29,537 WARN [Thread-1340 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-10T13:01:29,539 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x40cf659fa35798e2 with lease ID 0x6bb04ef2f9fe43e9: from storage DS-79bf7baf-fb4e-439e-a27f-e6d2ca05f859 node DatanodeRegistration(127.0.0.1:38617, datanodeUuid=50db00b8-c5f1-42c8-ba90-2937de5d7276, infoPort=34333, infoSecurePort=0, ipcPort=35005, storageInfo=lv=-57;cid=testClusterID;nsid=833709798;c=1731243675520), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T13:01:29,540 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x40cf659fa35798e2 with lease ID 0x6bb04ef2f9fe43e9: from storage DS-1bf6b48c-5ee1-4e73-ab5d-436a592ebb52 node DatanodeRegistration(127.0.0.1:38617, datanodeUuid=50db00b8-c5f1-42c8-ba90-2937de5d7276, infoPort=34333, infoSecurePort=0, ipcPort=35005, storageInfo=lv=-57;cid=testClusterID;nsid=833709798;c=1731243675520), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-10T13:01:29,608 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@22dbe5ea{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fa461a4c-9859-1454-6fc7-21397fa2a460/java.io.tmpdir/jetty-localhost-37505-hadoop-hdfs-3_4_1-tests_jar-_-any-3525702570717536983/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T13:01:29,609 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@343c572e{HTTP/1.1, (http/1.1)}{localhost:37505} 2024-11-10T13:01:29,609 INFO [Time-limited test {}] server.Server(415): Started @162580ms 2024-11-10T13:01:29,610 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-10T13:01:29,697 WARN [Thread-1371 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-10T13:01:29,699 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5a5678250f66b8c9 with lease ID 0x6bb04ef2f9fe43ea: from storage DS-c3c98701-f27c-4c50-9f43-3fd4a25c73c1 node DatanodeRegistration(127.0.0.1:34777, datanodeUuid=bb9fc405-77e9-4a4e-ade7-ab4468b8eae9, infoPort=40627, infoSecurePort=0, ipcPort=40443, storageInfo=lv=-57;cid=testClusterID;nsid=833709798;c=1731243675520), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T13:01:29,699 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5a5678250f66b8c9 with lease ID 0x6bb04ef2f9fe43ea: from storage DS-b026d2bf-270d-4cb4-b3f8-f8c6d717bec3 node DatanodeRegistration(127.0.0.1:34777, datanodeUuid=bb9fc405-77e9-4a4e-ade7-ab4468b8eae9, infoPort=40627, infoSecurePort=0, ipcPort=40443, storageInfo=lv=-57;cid=testClusterID;nsid=833709798;c=1731243675520), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T13:01:29,959 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:29,966 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:30,629 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-11-10T13:01:30,631 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-11-10T13:01:30,632 ERROR [FSHLog-0-hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc-prefix:3857ccc89b65,41129,1731243676346 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37643,DS-c3c98701-f27c-4c50-9f43-3fd4a25c73c1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:01:30,633 WARN [FSHLog-0-hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc-prefix:3857ccc89b65,41129,1731243676346 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37643,DS-c3c98701-f27c-4c50-9f43-3fd4a25c73c1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:01:30,633 DEBUG [regionserver/3857ccc89b65:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3857ccc89b65%2C41129%2C1731243676346:(num 1731243676752) roll requested 2024-11-10T13:01:30,633 INFO [regionserver/3857ccc89b65:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3857ccc89b65%2C41129%2C1731243676346.1731243690633 2024-11-10T13:01:30,641 DEBUG [regionserver/3857ccc89b65:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346/3857ccc89b65%2C41129%2C1731243676346.1731243676752 newFile=hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346/3857ccc89b65%2C41129%2C1731243676346.1731243690633 2024-11-10T13:01:30,641 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:30,641 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:30,642 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:30,642 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:30,642 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:30,642 INFO [regionserver/3857ccc89b65:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346/3857ccc89b65%2C41129%2C1731243676346.1731243676752 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346/3857ccc89b65%2C41129%2C1731243676346.1731243690633 2024-11-10T13:01:30,642 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37643,DS-c3c98701-f27c-4c50-9f43-3fd4a25c73c1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:01:30,642 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37643,DS-c3c98701-f27c-4c50-9f43-3fd4a25c73c1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:01:30,642 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346/3857ccc89b65%2C41129%2C1731243676346.1731243676752 2024-11-10T13:01:30,643 WARN [IPC Server handler 0 on default port 41197 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346/3857ccc89b65%2C41129%2C1731243676346.1731243676752 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1014 2024-11-10T13:01:30,643 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346/3857ccc89b65%2C41129%2C1731243676346.1731243676752 after 1ms 2024-11-10T13:01:30,650 DEBUG [regionserver/3857ccc89b65:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40627:40627),(127.0.0.1/127.0.0.1:34333:34333)] 2024-11-10T13:01:30,650 DEBUG [regionserver/3857ccc89b65:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346/3857ccc89b65%2C41129%2C1731243676346.1731243676752 is not closed yet, will try archiving it next time 2024-11-10T13:01:30,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34777 is added to blk_1073741833_1017 (size=1632) 2024-11-10T13:01:30,960 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:30,967 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:31,539 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1014: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-10T13:01:31,961 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:31,968 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:32,654 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-11-10T13:01:32,961 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:32,968 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:33,962 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:33,969 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:34,644 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346/3857ccc89b65%2C41129%2C1731243676346.1731243676752 after 4002ms 2024-11-10T13:01:34,664 WARN [ResponseProcessor for block BP-1340826382-172.17.0.2-1731243675520:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1340826382-172.17.0.2-1731243675520:blk_1073741837_1016 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:01:34,665 WARN [DataStreamer for file /user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346/3857ccc89b65%2C41129%2C1731243676346.1731243690633 block BP-1340826382-172.17.0.2-1731243675520:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1340826382-172.17.0.2-1731243675520:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34777,DS-c3c98701-f27c-4c50-9f43-3fd4a25c73c1,DISK], DatanodeInfoWithStorage[127.0.0.1:38617,DS-79bf7baf-fb4e-439e-a27f-e6d2ca05f859,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34777,DS-c3c98701-f27c-4c50-9f43-3fd4a25c73c1,DISK]) is bad. 2024-11-10T13:01:34,665 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-602421861_22 at /127.0.0.1:48226 [Receiving block BP-1340826382-172.17.0.2-1731243675520:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:34777:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48226 dst: /127.0.0.1:34777 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:01:34,665 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-602421861_22 at /127.0.0.1:39048 [Receiving block BP-1340826382-172.17.0.2-1731243675520:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:38617:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39048 dst: /127.0.0.1:38617 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:01:34,705 WARN [BP-1340826382-172.17.0.2-1731243675520 heartbeating to localhost/127.0.0.1:41197 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1340826382-172.17.0.2-1731243675520 (Datanode Uuid bb9fc405-77e9-4a4e-ade7-ab4468b8eae9) service to localhost/127.0.0.1:41197 2024-11-10T13:01:34,706 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fa461a4c-9859-1454-6fc7-21397fa2a460/cluster_ab5ceb79-a767-ff57-4846-5df93c62585a/data/data1/current/BP-1340826382-172.17.0.2-1731243675520 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T13:01:34,707 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fa461a4c-9859-1454-6fc7-21397fa2a460/cluster_ab5ceb79-a767-ff57-4846-5df93c62585a/data/data2/current/BP-1340826382-172.17.0.2-1731243675520 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T13:01:34,709 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@22dbe5ea{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T13:01:34,710 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@343c572e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T13:01:34,710 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T13:01:34,710 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@62f6e774{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T13:01:34,710 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@673d3bba{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fa461a4c-9859-1454-6fc7-21397fa2a460/hadoop.log.dir/,STOPPED} 2024-11-10T13:01:34,712 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-10T13:01:34,727 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T13:01:34,730 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T13:01:34,731 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T13:01:34,731 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T13:01:34,731 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-10T13:01:34,732 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@45604664{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fa461a4c-9859-1454-6fc7-21397fa2a460/hadoop.log.dir/,AVAILABLE} 2024-11-10T13:01:34,732 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@32c717fb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T13:01:34,856 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@78dcc37b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fa461a4c-9859-1454-6fc7-21397fa2a460/java.io.tmpdir/jetty-localhost-39417-hadoop-hdfs-3_4_1-tests_jar-_-any-8857178921003808179/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T13:01:34,857 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5a382d25{HTTP/1.1, (http/1.1)}{localhost:39417} 2024-11-10T13:01:34,857 INFO [Time-limited test {}] server.Server(415): Started @167828ms 2024-11-10T13:01:34,858 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-10T13:01:34,881 WARN [ResponseProcessor for block BP-1340826382-172.17.0.2-1731243675520:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1340826382-172.17.0.2-1731243675520:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:01:34,882 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-602421861_22 at /127.0.0.1:51428 [Receiving block BP-1340826382-172.17.0.2-1731243675520:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:38617:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51428 dst: /127.0.0.1:38617 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:01:34,885 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4ad0086e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T13:01:34,886 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2b5f8885{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T13:01:34,886 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T13:01:34,886 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@431e378c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T13:01:34,886 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@235b3635{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fa461a4c-9859-1454-6fc7-21397fa2a460/hadoop.log.dir/,STOPPED} 2024-11-10T13:01:34,888 WARN [BP-1340826382-172.17.0.2-1731243675520 heartbeating to localhost/127.0.0.1:41197 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-10T13:01:34,888 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-10T13:01:34,888 WARN [BP-1340826382-172.17.0.2-1731243675520 heartbeating to localhost/127.0.0.1:41197 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1340826382-172.17.0.2-1731243675520 (Datanode Uuid 50db00b8-c5f1-42c8-ba90-2937de5d7276) service to localhost/127.0.0.1:41197 2024-11-10T13:01:34,888 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-10T13:01:34,889 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fa461a4c-9859-1454-6fc7-21397fa2a460/cluster_ab5ceb79-a767-ff57-4846-5df93c62585a/data/data3/current/BP-1340826382-172.17.0.2-1731243675520 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T13:01:34,889 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fa461a4c-9859-1454-6fc7-21397fa2a460/cluster_ab5ceb79-a767-ff57-4846-5df93c62585a/data/data4/current/BP-1340826382-172.17.0.2-1731243675520 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T13:01:34,890 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-10T13:01:34,912 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T13:01:34,918 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T13:01:34,921 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T13:01:34,921 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T13:01:34,921 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-10T13:01:34,925 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7ff0f915{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fa461a4c-9859-1454-6fc7-21397fa2a460/hadoop.log.dir/,AVAILABLE} 2024-11-10T13:01:34,925 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2ebbad67{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T13:01:34,963 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:34,969 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:35,027 WARN [Thread-1414 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-10T13:01:35,037 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2cd1484c1d747f2e with lease ID 0x6bb04ef2f9fe43eb: from storage DS-c3c98701-f27c-4c50-9f43-3fd4a25c73c1 node DatanodeRegistration(127.0.0.1:36395, datanodeUuid=bb9fc405-77e9-4a4e-ade7-ab4468b8eae9, infoPort=36861, infoSecurePort=0, ipcPort=45309, storageInfo=lv=-57;cid=testClusterID;nsid=833709798;c=1731243675520), blocks: 7, hasStaleStorage: false, processing time: 7 msecs, invalidatedBlocks: 0 2024-11-10T13:01:35,038 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2cd1484c1d747f2e with lease ID 0x6bb04ef2f9fe43eb: from storage DS-b026d2bf-270d-4cb4-b3f8-f8c6d717bec3 node DatanodeRegistration(127.0.0.1:36395, datanodeUuid=bb9fc405-77e9-4a4e-ade7-ab4468b8eae9, infoPort=36861, infoSecurePort=0, ipcPort=45309, storageInfo=lv=-57;cid=testClusterID;nsid=833709798;c=1731243675520), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-10T13:01:35,074 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@468f57bd{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fa461a4c-9859-1454-6fc7-21397fa2a460/java.io.tmpdir/jetty-localhost-33271-hadoop-hdfs-3_4_1-tests_jar-_-any-13248552348837790540/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T13:01:35,075 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5091fc79{HTTP/1.1, (http/1.1)}{localhost:33271} 2024-11-10T13:01:35,075 INFO [Time-limited test {}] server.Server(415): Started @168045ms 2024-11-10T13:01:35,077 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-10T13:01:35,191 WARN [Thread-1445 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-10T13:01:35,194 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdbe309f11c7ae8b2 with lease ID 0x6bb04ef2f9fe43ec: from storage DS-79bf7baf-fb4e-439e-a27f-e6d2ca05f859 node DatanodeRegistration(127.0.0.1:45455, datanodeUuid=50db00b8-c5f1-42c8-ba90-2937de5d7276, infoPort=40709, infoSecurePort=0, ipcPort=42177, storageInfo=lv=-57;cid=testClusterID;nsid=833709798;c=1731243675520), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T13:01:35,194 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdbe309f11c7ae8b2 with lease ID 0x6bb04ef2f9fe43ec: from storage DS-1bf6b48c-5ee1-4e73-ab5d-436a592ebb52 node DatanodeRegistration(127.0.0.1:45455, datanodeUuid=50db00b8-c5f1-42c8-ba90-2937de5d7276, infoPort=40709, infoSecurePort=0, ipcPort=42177, storageInfo=lv=-57;cid=testClusterID;nsid=833709798;c=1731243675520), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T13:01:35,964 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:35,970 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:36,107 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-11-10T13:01:36,110 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-11-10T13:01:36,111 ERROR [FSHLog-0-hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc-prefix:3857ccc89b65,41129,1731243676346 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38617,DS-79bf7baf-fb4e-439e-a27f-e6d2ca05f859,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:01:36,111 WARN [FSHLog-0-hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc-prefix:3857ccc89b65,41129,1731243676346 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38617,DS-79bf7baf-fb4e-439e-a27f-e6d2ca05f859,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:01:36,111 DEBUG [regionserver/3857ccc89b65:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3857ccc89b65%2C41129%2C1731243676346:(num 1731243690633) roll requested 2024-11-10T13:01:36,112 INFO [regionserver/3857ccc89b65:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3857ccc89b65%2C41129%2C1731243676346.1731243696111 2024-11-10T13:01:36,117 DEBUG [regionserver/3857ccc89b65:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346/3857ccc89b65%2C41129%2C1731243676346.1731243690633 newFile=hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346/3857ccc89b65%2C41129%2C1731243676346.1731243696111 2024-11-10T13:01:36,117 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:36,118 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:36,118 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:36,118 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:36,118 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:36,118 INFO [regionserver/3857ccc89b65:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346/3857ccc89b65%2C41129%2C1731243676346.1731243690633 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346/3857ccc89b65%2C41129%2C1731243676346.1731243696111 2024-11-10T13:01:36,118 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38617,DS-79bf7baf-fb4e-439e-a27f-e6d2ca05f859,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:01:36,118 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38617,DS-79bf7baf-fb4e-439e-a27f-e6d2ca05f859,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:01:36,119 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346/3857ccc89b65%2C41129%2C1731243676346.1731243690633 2024-11-10T13:01:36,119 WARN [IPC Server handler 0 on default port 41197 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346/3857ccc89b65%2C41129%2C1731243676346.1731243690633 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-11-10T13:01:36,119 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346/3857ccc89b65%2C41129%2C1731243676346.1731243690633 after 0ms 2024-11-10T13:01:36,120 DEBUG [regionserver/3857ccc89b65:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36861:36861),(127.0.0.1/127.0.0.1:40709:40709)] 2024-11-10T13:01:36,120 DEBUG [regionserver/3857ccc89b65:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346/3857ccc89b65%2C41129%2C1731243676346.1731243690633 is not closed yet, will try archiving it next time 2024-11-10T13:01:36,965 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:36,970 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:37,965 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:37,971 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:38,121 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3857ccc89b65%2C41129%2C1731243676346.1731243698121 2024-11-10T13:01:38,127 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346/3857ccc89b65%2C41129%2C1731243676346.1731243696111 newFile=hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346/3857ccc89b65%2C41129%2C1731243676346.1731243698121 2024-11-10T13:01:38,127 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:38,127 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:38,127 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:38,128 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:38,128 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:38,128 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346/3857ccc89b65%2C41129%2C1731243676346.1731243696111 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346/3857ccc89b65%2C41129%2C1731243676346.1731243698121 2024-11-10T13:01:38,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45455 is added to blk_1073741838_1019 (size=1264) 2024-11-10T13:01:38,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36395 is added to blk_1073741838_1019 (size=1264) 2024-11-10T13:01:38,130 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346/3857ccc89b65%2C41129%2C1731243676346.1731243690633 is not closed yet, will try archiving it next time 2024-11-10T13:01:38,132 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40709:40709),(127.0.0.1/127.0.0.1:36861:36861)] 2024-11-10T13:01:38,132 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346/3857ccc89b65%2C41129%2C1731243676346.1731243690633 is not closed yet, will try archiving it next time 2024-11-10T13:01:38,133 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346/3857ccc89b65%2C41129%2C1731243676346.1731243676752 2024-11-10T13:01:38,133 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346/3857ccc89b65%2C41129%2C1731243676346.1731243676752 2024-11-10T13:01:38,133 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346/3857ccc89b65%2C41129%2C1731243676346.1731243676752 after 0ms 2024-11-10T13:01:38,133 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346/3857ccc89b65%2C41129%2C1731243676346.1731243676752 2024-11-10T13:01:38,146 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1731243677632/Put/vlen=218/seqid=0] 2024-11-10T13:01:38,146 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1731243687301/Put/vlen=1045/seqid=0] 2024-11-10T13:01:38,146 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346/3857ccc89b65%2C41129%2C1731243676346.1731243676752 2024-11-10T13:01:38,146 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346/3857ccc89b65%2C41129%2C1731243676346.1731243690633 2024-11-10T13:01:38,146 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346/3857ccc89b65%2C41129%2C1731243676346.1731243690633 2024-11-10T13:01:38,147 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346/3857ccc89b65%2C41129%2C1731243676346.1731243690633 after 1ms 2024-11-10T13:01:38,147 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346/3857ccc89b65%2C41129%2C1731243676346.1731243690633 2024-11-10T13:01:38,151 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1731243690632/Put/vlen=1045/seqid=0] 2024-11-10T13:01:38,152 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1731243692655/Put/vlen=1045/seqid=0] 2024-11-10T13:01:38,152 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346/3857ccc89b65%2C41129%2C1731243676346.1731243690633 2024-11-10T13:01:38,152 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346/3857ccc89b65%2C41129%2C1731243676346.1731243696111 2024-11-10T13:01:38,152 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346/3857ccc89b65%2C41129%2C1731243676346.1731243696111 2024-11-10T13:01:38,152 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346/3857ccc89b65%2C41129%2C1731243676346.1731243696111 after 0ms 2024-11-10T13:01:38,152 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346/3857ccc89b65%2C41129%2C1731243676346.1731243696111 2024-11-10T13:01:38,155 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1731243696111/Put/vlen=1045/seqid=0] 2024-11-10T13:01:38,155 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346/3857ccc89b65%2C41129%2C1731243676346.1731243698121 2024-11-10T13:01:38,156 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346/3857ccc89b65%2C41129%2C1731243676346.1731243698121 2024-11-10T13:01:38,156 WARN [IPC Server handler 3 on default port 41197 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346/3857ccc89b65%2C41129%2C1731243676346.1731243698121 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-11-10T13:01:38,156 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346/3857ccc89b65%2C41129%2C1731243676346.1731243698121 after 0ms 2024-11-10T13:01:38,966 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:38,972 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:39,034 WARN [ResponseProcessor for block BP-1340826382-172.17.0.2-1731243675520:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1340826382-172.17.0.2-1731243675520:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:01:39,034 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_764420514_22 at /127.0.0.1:43850 [Receiving block BP-1340826382-172.17.0.2-1731243675520:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:45455:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43850 dst: /127.0.0.1:45455 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:45455 remote=/127.0.0.1:43850]. Total timeout mills is 60000, 59092 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:01:39,034 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_764420514_22 at /127.0.0.1:57868 [Receiving block BP-1340826382-172.17.0.2-1731243675520:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:36395:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57868 dst: /127.0.0.1:36395 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:01:39,034 WARN [DataStreamer for file /user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346/3857ccc89b65%2C41129%2C1731243676346.1731243698121 block BP-1340826382-172.17.0.2-1731243675520:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1340826382-172.17.0.2-1731243675520:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45455,DS-79bf7baf-fb4e-439e-a27f-e6d2ca05f859,DISK], DatanodeInfoWithStorage[127.0.0.1:36395,DS-c3c98701-f27c-4c50-9f43-3fd4a25c73c1,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45455,DS-79bf7baf-fb4e-439e-a27f-e6d2ca05f859,DISK]) is bad. 2024-11-10T13:01:39,035 WARN [DataStreamer for file /user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346/3857ccc89b65%2C41129%2C1731243676346.1731243698121 block BP-1340826382-172.17.0.2-1731243675520:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1340826382-172.17.0.2-1731243675520:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:01:39,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45455 is added to blk_1073741839_1022 (size=85) 2024-11-10T13:01:39,966 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:39,972 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:40,030 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-10T13:01:40,120 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346/3857ccc89b65%2C41129%2C1731243676346.1731243690633 after 4001ms 2024-11-10T13:01:40,967 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:40,973 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:41,968 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:41,973 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:42,157 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346/3857ccc89b65%2C41129%2C1731243676346.1731243698121 after 4001ms 2024-11-10T13:01:42,157 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346/3857ccc89b65%2C41129%2C1731243676346.1731243698121 2024-11-10T13:01:42,161 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346/3857ccc89b65%2C41129%2C1731243676346.1731243698121 2024-11-10T13:01:42,161 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-11-10T13:01:42,162 ERROR [FSHLog-0-hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc-prefix:3857ccc89b65,41129,1731243676346.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37643,DS-c3c98701-f27c-4c50-9f43-3fd4a25c73c1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:01:42,162 WARN [FSHLog-0-hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc-prefix:3857ccc89b65,41129,1731243676346.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37643,DS-c3c98701-f27c-4c50-9f43-3fd4a25c73c1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:01:42,162 DEBUG [regionserver/3857ccc89b65:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3857ccc89b65%2C41129%2C1731243676346.meta:.meta(num 1731243677120) roll requested 2024-11-10T13:01:42,162 INFO [regionserver/3857ccc89b65:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3857ccc89b65%2C41129%2C1731243676346.meta.1731243702162.meta 2024-11-10T13:01:42,167 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:42,167 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:42,167 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:42,168 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:42,168 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:42,168 INFO [regionserver/3857ccc89b65:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346/3857ccc89b65%2C41129%2C1731243676346.meta.1731243677120.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346/3857ccc89b65%2C41129%2C1731243676346.meta.1731243702162.meta 2024-11-10T13:01:42,168 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37643,DS-c3c98701-f27c-4c50-9f43-3fd4a25c73c1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:01:42,168 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37643,DS-c3c98701-f27c-4c50-9f43-3fd4a25c73c1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:01:42,168 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346/3857ccc89b65%2C41129%2C1731243676346.meta.1731243677120.meta 2024-11-10T13:01:42,169 DEBUG [regionserver/3857ccc89b65:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40709:40709),(127.0.0.1/127.0.0.1:36861:36861)] 2024-11-10T13:01:42,169 DEBUG [regionserver/3857ccc89b65:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346/3857ccc89b65%2C41129%2C1731243676346.meta.1731243677120.meta is not closed yet, will try archiving it next time 2024-11-10T13:01:42,169 WARN [IPC Server handler 1 on default port 41197 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346/3857ccc89b65%2C41129%2C1731243676346.meta.1731243677120.meta has not been closed. Lease recovery is in progress. RecoveryId = 1024 for block blk_1073741834_1015 2024-11-10T13:01:42,169 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346/3857ccc89b65%2C41129%2C1731243676346.meta.1731243677120.meta after 1ms 2024-11-10T13:01:42,184 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/data/hbase/meta/1588230740/.tmp/info/39f888089c814ea289c5f03ea98a1074 is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1731243677282.4f147ccc4ef2ed9d79254f24aeb55dc7./info:regioninfo/1731243677636/Put/seqid=0 2024-11-10T13:01:42,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45455 is added to blk_1073741841_1025 (size=7125) 2024-11-10T13:01:42,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36395 is added to blk_1073741841_1025 (size=7125) 2024-11-10T13:01:42,193 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/data/hbase/meta/1588230740/.tmp/info/39f888089c814ea289c5f03ea98a1074 2024-11-10T13:01:42,213 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/data/hbase/meta/1588230740/.tmp/ns/1e7a4ab3c2514737885d852bb3685dd4 is 43, key is default/ns:d/1731243677164/Put/seqid=0 2024-11-10T13:01:42,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36395 is added to blk_1073741842_1026 (size=5153) 2024-11-10T13:01:42,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45455 is added to blk_1073741842_1026 (size=5153) 2024-11-10T13:01:42,218 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/data/hbase/meta/1588230740/.tmp/ns/1e7a4ab3c2514737885d852bb3685dd4 2024-11-10T13:01:42,237 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/data/hbase/meta/1588230740/.tmp/table/16f08d58773b413683f59a74dde9d147 is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1731243677645/Put/seqid=0 2024-11-10T13:01:42,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45455 is added to blk_1073741843_1027 (size=5438) 2024-11-10T13:01:42,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36395 is added to blk_1073741843_1027 (size=5438) 2024-11-10T13:01:42,242 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/data/hbase/meta/1588230740/.tmp/table/16f08d58773b413683f59a74dde9d147 2024-11-10T13:01:42,247 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/data/hbase/meta/1588230740/.tmp/info/39f888089c814ea289c5f03ea98a1074 as hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/data/hbase/meta/1588230740/info/39f888089c814ea289c5f03ea98a1074 2024-11-10T13:01:42,252 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/data/hbase/meta/1588230740/info/39f888089c814ea289c5f03ea98a1074, entries=10, sequenceid=11, filesize=7.0 K 2024-11-10T13:01:42,253 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/data/hbase/meta/1588230740/.tmp/ns/1e7a4ab3c2514737885d852bb3685dd4 as hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/data/hbase/meta/1588230740/ns/1e7a4ab3c2514737885d852bb3685dd4 2024-11-10T13:01:42,257 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/data/hbase/meta/1588230740/ns/1e7a4ab3c2514737885d852bb3685dd4, entries=2, sequenceid=11, filesize=5.0 K 2024-11-10T13:01:42,258 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/data/hbase/meta/1588230740/.tmp/table/16f08d58773b413683f59a74dde9d147 as hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/data/hbase/meta/1588230740/table/16f08d58773b413683f59a74dde9d147 2024-11-10T13:01:42,263 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/data/hbase/meta/1588230740/table/16f08d58773b413683f59a74dde9d147, entries=2, sequenceid=11, filesize=5.3 K 2024-11-10T13:01:42,264 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 103ms, sequenceid=11, compaction requested=false 2024-11-10T13:01:42,264 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-10T13:01:42,264 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 4f147ccc4ef2ed9d79254f24aeb55dc7 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-10T13:01:42,265 ERROR [FSHLog-0-hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc-prefix:3857ccc89b65,41129,1731243676346 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1340826382-172.17.0.2-1731243675520:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:01:42,265 WARN [FSHLog-0-hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc-prefix:3857ccc89b65,41129,1731243676346 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1340826382-172.17.0.2-1731243675520:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:01:42,265 DEBUG [regionserver/3857ccc89b65:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3857ccc89b65%2C41129%2C1731243676346:(num 1731243698121) roll requested 2024-11-10T13:01:42,266 INFO [regionserver/3857ccc89b65:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3857ccc89b65%2C41129%2C1731243676346.1731243702266 2024-11-10T13:01:42,270 DEBUG [regionserver/3857ccc89b65:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346/3857ccc89b65%2C41129%2C1731243676346.1731243698121 newFile=hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346/3857ccc89b65%2C41129%2C1731243676346.1731243702266 2024-11-10T13:01:42,270 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:42,270 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:42,271 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:42,271 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:42,271 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:42,271 INFO [regionserver/3857ccc89b65:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346/3857ccc89b65%2C41129%2C1731243676346.1731243698121 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346/3857ccc89b65%2C41129%2C1731243676346.1731243702266 2024-11-10T13:01:42,271 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1340826382-172.17.0.2-1731243675520:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:01:42,271 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1340826382-172.17.0.2-1731243675520:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:01:42,272 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346/3857ccc89b65%2C41129%2C1731243676346.1731243698121 2024-11-10T13:01:42,272 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346/3857ccc89b65%2C41129%2C1731243676346.1731243698121 after 0ms 2024-11-10T13:01:42,276 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346/3857ccc89b65%2C41129%2C1731243676346.1731243698121 to hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/oldWALs/3857ccc89b65%2C41129%2C1731243676346.1731243698121 2024-11-10T13:01:42,276 DEBUG [regionserver/3857ccc89b65:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36861:36861),(127.0.0.1/127.0.0.1:40709:40709)] 2024-11-10T13:01:42,297 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/data/default/TestLogRolling-testLogRollOnPipelineRestart/4f147ccc4ef2ed9d79254f24aeb55dc7/.tmp/info/d107549994c0434a84c55d74a9c24194 is 1080, key is row1002/info:/1731243687301/Put/seqid=0 2024-11-10T13:01:42,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36395 is added to blk_1073741845_1029 (size=9270) 2024-11-10T13:01:42,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45455 is added to blk_1073741845_1029 (size=9270) 2024-11-10T13:01:42,303 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/data/default/TestLogRolling-testLogRollOnPipelineRestart/4f147ccc4ef2ed9d79254f24aeb55dc7/.tmp/info/d107549994c0434a84c55d74a9c24194 2024-11-10T13:01:42,309 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/data/default/TestLogRolling-testLogRollOnPipelineRestart/4f147ccc4ef2ed9d79254f24aeb55dc7/.tmp/info/d107549994c0434a84c55d74a9c24194 as hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/data/default/TestLogRolling-testLogRollOnPipelineRestart/4f147ccc4ef2ed9d79254f24aeb55dc7/info/d107549994c0434a84c55d74a9c24194 2024-11-10T13:01:42,315 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/data/default/TestLogRolling-testLogRollOnPipelineRestart/4f147ccc4ef2ed9d79254f24aeb55dc7/info/d107549994c0434a84c55d74a9c24194, entries=4, sequenceid=8, filesize=9.1 K 2024-11-10T13:01:42,316 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for 4f147ccc4ef2ed9d79254f24aeb55dc7 in 52ms, sequenceid=8, compaction requested=false 2024-11-10T13:01:42,316 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 4f147ccc4ef2ed9d79254f24aeb55dc7: 2024-11-10T13:01:42,321 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-10T13:01:42,322 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-10T13:01:42,322 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T13:01:42,322 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T13:01:42,322 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T13:01:42,322 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-10T13:01:42,322 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-10T13:01:42,322 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=191438578, stopped=false 2024-11-10T13:01:42,322 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=3857ccc89b65,37415,1731243676301 2024-11-10T13:01:42,323 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37415-0x10101f7fd0c0000, quorum=127.0.0.1:56422, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-10T13:01:42,324 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41129-0x10101f7fd0c0001, quorum=127.0.0.1:56422, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-10T13:01:42,324 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37415-0x10101f7fd0c0000, quorum=127.0.0.1:56422, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:01:42,324 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41129-0x10101f7fd0c0001, quorum=127.0.0.1:56422, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:01:42,324 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-10T13:01:42,324 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-10T13:01:42,324 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T13:01:42,324 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T13:01:42,324 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:37415-0x10101f7fd0c0000, quorum=127.0.0.1:56422, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T13:01:42,324 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41129-0x10101f7fd0c0001, quorum=127.0.0.1:56422, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T13:01:42,324 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3857ccc89b65,41129,1731243676346' ***** 2024-11-10T13:01:42,324 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-10T13:01:42,325 INFO [RS:0;3857ccc89b65:41129 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-10T13:01:42,325 INFO [RS:0;3857ccc89b65:41129 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-10T13:01:42,325 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-10T13:01:42,325 INFO [RS:0;3857ccc89b65:41129 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-10T13:01:42,325 INFO [RS:0;3857ccc89b65:41129 {}] regionserver.HRegionServer(3091): Received CLOSE for 4f147ccc4ef2ed9d79254f24aeb55dc7 2024-11-10T13:01:42,325 INFO [RS:0;3857ccc89b65:41129 {}] regionserver.HRegionServer(959): stopping server 3857ccc89b65,41129,1731243676346 2024-11-10T13:01:42,325 INFO [RS:0;3857ccc89b65:41129 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-10T13:01:42,325 INFO [RS:0;3857ccc89b65:41129 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;3857ccc89b65:41129. 2024-11-10T13:01:42,325 DEBUG [RS:0;3857ccc89b65:41129 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T13:01:42,325 DEBUG [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 4f147ccc4ef2ed9d79254f24aeb55dc7, disabling compactions & flushes 2024-11-10T13:01:42,325 DEBUG [RS:0;3857ccc89b65:41129 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T13:01:42,325 INFO [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731243677282.4f147ccc4ef2ed9d79254f24aeb55dc7. 2024-11-10T13:01:42,325 INFO [RS:0;3857ccc89b65:41129 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-10T13:01:42,325 INFO [RS:0;3857ccc89b65:41129 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-10T13:01:42,325 DEBUG [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731243677282.4f147ccc4ef2ed9d79254f24aeb55dc7. 2024-11-10T13:01:42,325 INFO [RS:0;3857ccc89b65:41129 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-10T13:01:42,326 DEBUG [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731243677282.4f147ccc4ef2ed9d79254f24aeb55dc7. after waiting 1 ms 2024-11-10T13:01:42,326 INFO [RS:0;3857ccc89b65:41129 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-10T13:01:42,326 DEBUG [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731243677282.4f147ccc4ef2ed9d79254f24aeb55dc7. 2024-11-10T13:01:42,326 INFO [RS:0;3857ccc89b65:41129 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-10T13:01:42,326 DEBUG [RS:0;3857ccc89b65:41129 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 4f147ccc4ef2ed9d79254f24aeb55dc7=TestLogRolling-testLogRollOnPipelineRestart,,1731243677282.4f147ccc4ef2ed9d79254f24aeb55dc7.} 2024-11-10T13:01:42,326 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-10T13:01:42,326 DEBUG [RS:0;3857ccc89b65:41129 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 4f147ccc4ef2ed9d79254f24aeb55dc7 2024-11-10T13:01:42,326 INFO [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-10T13:01:42,326 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-10T13:01:42,326 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-10T13:01:42,326 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-10T13:01:42,330 DEBUG [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/data/default/TestLogRolling-testLogRollOnPipelineRestart/4f147ccc4ef2ed9d79254f24aeb55dc7/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-11-10T13:01:42,331 INFO [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731243677282.4f147ccc4ef2ed9d79254f24aeb55dc7. 2024-11-10T13:01:42,331 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-10T13:01:42,331 DEBUG [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 4f147ccc4ef2ed9d79254f24aeb55dc7: Waiting for close lock at 1731243702325Running coprocessor pre-close hooks at 1731243702325Disabling compacts and flushes for region at 1731243702325Disabling writes for close at 1731243702326 (+1 ms)Writing region close event to WAL at 1731243702327 (+1 ms)Running coprocessor post-close hooks at 1731243702331 (+4 ms)Closed at 1731243702331 2024-11-10T13:01:42,331 DEBUG [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731243677282.4f147ccc4ef2ed9d79254f24aeb55dc7. 2024-11-10T13:01:42,332 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-10T13:01:42,332 INFO [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-10T13:01:42,332 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731243702326Running coprocessor pre-close hooks at 1731243702326Disabling compacts and flushes for region at 1731243702326Disabling writes for close at 1731243702326Writing region close event to WAL at 1731243702328 (+2 ms)Running coprocessor post-close hooks at 1731243702332 (+4 ms)Closed at 1731243702332 2024-11-10T13:01:42,332 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-10T13:01:42,526 INFO [RS:0;3857ccc89b65:41129 {}] regionserver.HRegionServer(976): stopping server 3857ccc89b65,41129,1731243676346; all regions closed. 2024-11-10T13:01:42,527 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:42,527 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:42,527 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:42,527 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:42,527 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:42,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36395 is added to blk_1073741840_1023 (size=825) 2024-11-10T13:01:42,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45455 is added to blk_1073741840_1023 (size=825) 2024-11-10T13:01:42,602 INFO [regionserver/3857ccc89b65:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T13:01:42,692 INFO [regionserver/3857ccc89b65:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-10T13:01:42,693 INFO [regionserver/3857ccc89b65:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-10T13:01:42,968 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:42,973 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:43,969 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:43,974 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:44,970 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:44,974 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:45,970 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:45,975 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:46,170 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346/3857ccc89b65%2C41129%2C1731243676346.meta.1731243677120.meta after 4002ms 2024-11-10T13:01:46,171 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/WALs/3857ccc89b65,41129,1731243676346/3857ccc89b65%2C41129%2C1731243676346.meta.1731243677120.meta to hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/oldWALs/3857ccc89b65%2C41129%2C1731243676346.meta.1731243677120.meta 2024-11-10T13:01:46,178 DEBUG [RS:0;3857ccc89b65:41129 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/oldWALs 2024-11-10T13:01:46,178 INFO [RS:0;3857ccc89b65:41129 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3857ccc89b65%2C41129%2C1731243676346.meta:.meta(num 1731243702162) 2024-11-10T13:01:46,179 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:46,179 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:46,179 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:46,179 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:46,179 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:46,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45455 is added to blk_1073741844_1028 (size=1162) 2024-11-10T13:01:46,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36395 is added to blk_1073741844_1028 (size=1162) 2024-11-10T13:01:46,186 DEBUG [RS:0;3857ccc89b65:41129 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/oldWALs 2024-11-10T13:01:46,186 INFO [RS:0;3857ccc89b65:41129 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3857ccc89b65%2C41129%2C1731243676346:(num 1731243702266) 2024-11-10T13:01:46,186 DEBUG [RS:0;3857ccc89b65:41129 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T13:01:46,186 INFO [RS:0;3857ccc89b65:41129 {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T13:01:46,186 INFO [RS:0;3857ccc89b65:41129 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-10T13:01:46,187 INFO [RS:0;3857ccc89b65:41129 {}] hbase.ChoreService(370): Chore service for: regionserver/3857ccc89b65:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-10T13:01:46,187 INFO [RS:0;3857ccc89b65:41129 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-10T13:01:46,187 INFO [regionserver/3857ccc89b65:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-10T13:01:46,187 INFO [RS:0;3857ccc89b65:41129 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41129 2024-11-10T13:01:46,189 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41129-0x10101f7fd0c0001, quorum=127.0.0.1:56422, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3857ccc89b65,41129,1731243676346 2024-11-10T13:01:46,189 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37415-0x10101f7fd0c0000, quorum=127.0.0.1:56422, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-10T13:01:46,189 INFO [RS:0;3857ccc89b65:41129 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-10T13:01:46,190 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3857ccc89b65,41129,1731243676346] 2024-11-10T13:01:46,191 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3857ccc89b65,41129,1731243676346 already deleted, retry=false 2024-11-10T13:01:46,191 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3857ccc89b65,41129,1731243676346 expired; onlineServers=0 2024-11-10T13:01:46,191 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '3857ccc89b65,37415,1731243676301' ***** 2024-11-10T13:01:46,191 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-10T13:01:46,192 INFO [M:0;3857ccc89b65:37415 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-10T13:01:46,192 INFO [M:0;3857ccc89b65:37415 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-10T13:01:46,192 DEBUG [M:0;3857ccc89b65:37415 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-10T13:01:46,192 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-10T13:01:46,192 DEBUG [M:0;3857ccc89b65:37415 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-10T13:01:46,192 DEBUG [master/3857ccc89b65:0:becomeActiveMaster-HFileCleaner.large.0-1731243676508 {}] cleaner.HFileCleaner(306): Exit Thread[master/3857ccc89b65:0:becomeActiveMaster-HFileCleaner.large.0-1731243676508,5,FailOnTimeoutGroup] 2024-11-10T13:01:46,192 DEBUG [master/3857ccc89b65:0:becomeActiveMaster-HFileCleaner.small.0-1731243676508 {}] cleaner.HFileCleaner(306): Exit Thread[master/3857ccc89b65:0:becomeActiveMaster-HFileCleaner.small.0-1731243676508,5,FailOnTimeoutGroup] 2024-11-10T13:01:46,192 INFO [M:0;3857ccc89b65:37415 {}] hbase.ChoreService(370): Chore service for: master/3857ccc89b65:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-10T13:01:46,192 INFO [M:0;3857ccc89b65:37415 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-10T13:01:46,192 DEBUG [M:0;3857ccc89b65:37415 {}] master.HMaster(1795): Stopping service threads 2024-11-10T13:01:46,192 INFO [M:0;3857ccc89b65:37415 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-10T13:01:46,192 INFO [M:0;3857ccc89b65:37415 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-10T13:01:46,193 INFO [M:0;3857ccc89b65:37415 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-10T13:01:46,193 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-10T13:01:46,194 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1015: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-10T13:01:46,194 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37415-0x10101f7fd0c0000, quorum=127.0.0.1:56422, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-10T13:01:46,194 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37415-0x10101f7fd0c0000, quorum=127.0.0.1:56422, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:01:46,194 DEBUG [M:0;3857ccc89b65:37415 {}] zookeeper.ZKUtil(347): master:37415-0x10101f7fd0c0000, quorum=127.0.0.1:56422, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-10T13:01:46,194 WARN [M:0;3857ccc89b65:37415 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-10T13:01:46,195 INFO [M:0;3857ccc89b65:37415 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/.lastflushedseqids 2024-11-10T13:01:46,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36395 is added to blk_1073741846_1030 (size=120) 2024-11-10T13:01:46,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45455 is added to blk_1073741846_1030 (size=120) 2024-11-10T13:01:46,201 INFO [M:0;3857ccc89b65:37415 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-10T13:01:46,201 INFO [M:0;3857ccc89b65:37415 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-10T13:01:46,201 DEBUG [M:0;3857ccc89b65:37415 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-10T13:01:46,201 INFO [M:0;3857ccc89b65:37415 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T13:01:46,201 DEBUG [M:0;3857ccc89b65:37415 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T13:01:46,201 DEBUG [M:0;3857ccc89b65:37415 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-10T13:01:46,201 DEBUG [M:0;3857ccc89b65:37415 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T13:01:46,201 INFO [M:0;3857ccc89b65:37415 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.17 KB heapSize=29.16 KB 2024-11-10T13:01:46,202 ERROR [FSHLog-0-hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/MasterData-prefix:3857ccc89b65,37415,1731243676301 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37643,DS-c3c98701-f27c-4c50-9f43-3fd4a25c73c1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:01:46,202 WARN [FSHLog-0-hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/MasterData-prefix:3857ccc89b65,37415,1731243676301 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37643,DS-c3c98701-f27c-4c50-9f43-3fd4a25c73c1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:01:46,202 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 3857ccc89b65%2C37415%2C1731243676301:(num 1731243676429) roll requested 2024-11-10T13:01:46,202 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3857ccc89b65%2C37415%2C1731243676301.1731243706202 2024-11-10T13:01:46,209 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:46,209 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:46,210 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:46,210 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:46,210 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:46,210 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/MasterData/WALs/3857ccc89b65,37415,1731243676301/3857ccc89b65%2C37415%2C1731243676301.1731243676429 with entries=53, filesize=26.62 KB; new WAL /user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/MasterData/WALs/3857ccc89b65,37415,1731243676301/3857ccc89b65%2C37415%2C1731243676301.1731243706202 2024-11-10T13:01:46,210 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37643,DS-c3c98701-f27c-4c50-9f43-3fd4a25c73c1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:01:46,211 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37643,DS-c3c98701-f27c-4c50-9f43-3fd4a25c73c1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-10T13:01:46,211 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/MasterData/WALs/3857ccc89b65,37415,1731243676301/3857ccc89b65%2C37415%2C1731243676301.1731243676429 2024-11-10T13:01:46,211 WARN [IPC Server handler 1 on default port 41197 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/MasterData/WALs/3857ccc89b65,37415,1731243676301/3857ccc89b65%2C37415%2C1731243676301.1731243676429 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1013 2024-11-10T13:01:46,211 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/MasterData/WALs/3857ccc89b65,37415,1731243676301/3857ccc89b65%2C37415%2C1731243676301.1731243676429 after 0ms 2024-11-10T13:01:46,216 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40709:40709),(127.0.0.1/127.0.0.1:36861:36861)] 2024-11-10T13:01:46,216 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/MasterData/WALs/3857ccc89b65,37415,1731243676301/3857ccc89b65%2C37415%2C1731243676301.1731243676429 is not closed yet, will try archiving it next time 2024-11-10T13:01:46,234 DEBUG [M:0;3857ccc89b65:37415 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d60d93d1a3e5435dade7cc2378df2110 is 82, key is hbase:meta,,1/info:regioninfo/1731243677149/Put/seqid=0 2024-11-10T13:01:46,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45455 is added to blk_1073741848_1033 (size=5672) 2024-11-10T13:01:46,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36395 is added to blk_1073741848_1033 (size=5672) 2024-11-10T13:01:46,241 INFO [M:0;3857ccc89b65:37415 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d60d93d1a3e5435dade7cc2378df2110 2024-11-10T13:01:46,273 DEBUG [M:0;3857ccc89b65:37415 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/050be13c39484fc8aa765c6de44b6060 is 778, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731243677649/Put/seqid=0 2024-11-10T13:01:46,282 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-10T13:01:46,291 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41129-0x10101f7fd0c0001, quorum=127.0.0.1:56422, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T13:01:46,291 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41129-0x10101f7fd0c0001, quorum=127.0.0.1:56422, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T13:01:46,291 INFO [RS:0;3857ccc89b65:41129 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-10T13:01:46,291 INFO [RS:0;3857ccc89b65:41129 {}] regionserver.HRegionServer(1031): Exiting; stopping=3857ccc89b65,41129,1731243676346; zookeeper connection closed. 2024-11-10T13:01:46,302 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@59938716 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@59938716 2024-11-10T13:01:46,302 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-10T13:01:46,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36395 is added to blk_1073741849_1034 (size=6118) 2024-11-10T13:01:46,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45455 is added to blk_1073741849_1034 (size=6118) 2024-11-10T13:01:46,310 INFO [M:0;3857ccc89b65:37415 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.57 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/050be13c39484fc8aa765c6de44b6060 2024-11-10T13:01:46,331 DEBUG [M:0;3857ccc89b65:37415 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/63ce3b263d144f01b4143d5baf9297a0 is 69, key is 3857ccc89b65,41129,1731243676346/rs:state/1731243676584/Put/seqid=0 2024-11-10T13:01:46,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45455 is added to blk_1073741850_1035 (size=5156) 2024-11-10T13:01:46,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36395 is added to blk_1073741850_1035 (size=5156) 2024-11-10T13:01:46,339 INFO [M:0;3857ccc89b65:37415 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/63ce3b263d144f01b4143d5baf9297a0 2024-11-10T13:01:46,363 DEBUG [M:0;3857ccc89b65:37415 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/435ba1fc555040cdae344eb79adb5b79 is 52, key is load_balancer_on/state:d/1731243677275/Put/seqid=0 2024-11-10T13:01:46,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45455 is added to blk_1073741851_1036 (size=5056) 2024-11-10T13:01:46,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36395 is added to blk_1073741851_1036 (size=5056) 2024-11-10T13:01:46,385 INFO [M:0;3857ccc89b65:37415 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/435ba1fc555040cdae344eb79adb5b79 2024-11-10T13:01:46,392 DEBUG [M:0;3857ccc89b65:37415 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d60d93d1a3e5435dade7cc2378df2110 as hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d60d93d1a3e5435dade7cc2378df2110 2024-11-10T13:01:46,398 INFO [M:0;3857ccc89b65:37415 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d60d93d1a3e5435dade7cc2378df2110, entries=8, sequenceid=56, filesize=5.5 K 2024-11-10T13:01:46,400 DEBUG [M:0;3857ccc89b65:37415 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/050be13c39484fc8aa765c6de44b6060 as hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/050be13c39484fc8aa765c6de44b6060 2024-11-10T13:01:46,407 INFO [M:0;3857ccc89b65:37415 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/050be13c39484fc8aa765c6de44b6060, entries=6, sequenceid=56, filesize=6.0 K 2024-11-10T13:01:46,408 DEBUG [M:0;3857ccc89b65:37415 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/63ce3b263d144f01b4143d5baf9297a0 as hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/63ce3b263d144f01b4143d5baf9297a0 2024-11-10T13:01:46,414 INFO [M:0;3857ccc89b65:37415 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/63ce3b263d144f01b4143d5baf9297a0, entries=1, sequenceid=56, filesize=5.0 K 2024-11-10T13:01:46,415 DEBUG [M:0;3857ccc89b65:37415 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/435ba1fc555040cdae344eb79adb5b79 as hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/435ba1fc555040cdae344eb79adb5b79 2024-11-10T13:01:46,421 INFO [M:0;3857ccc89b65:37415 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/435ba1fc555040cdae344eb79adb5b79, entries=1, sequenceid=56, filesize=4.9 K 2024-11-10T13:01:46,422 INFO [M:0;3857ccc89b65:37415 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.17 KB/23726, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 221ms, sequenceid=56, compaction requested=false 2024-11-10T13:01:46,425 INFO [M:0;3857ccc89b65:37415 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T13:01:46,425 DEBUG [M:0;3857ccc89b65:37415 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731243706201Disabling compacts and flushes for region at 1731243706201Disabling writes for close at 1731243706201Obtaining lock to block concurrent updates at 1731243706201Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731243706201Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23726, getHeapSize=29800, getOffHeapSize=0, getCellsCount=67 at 1731243706202 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731243706216 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731243706217 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731243706234 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731243706234Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731243706249 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731243706272 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731243706272Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731243706316 (+44 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731243706331 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731243706331Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731243706344 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731243706362 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731243706362Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@fed46a: reopening flushed file at 1731243706391 (+29 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6daf8619: reopening flushed file at 1731243706399 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@40a109db: reopening flushed file at 1731243706407 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@15e49e5a: reopening flushed file at 1731243706414 (+7 ms)Finished flush of dataSize ~23.17 KB/23726, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 221ms, sequenceid=56, compaction requested=false at 1731243706422 (+8 ms)Writing region close event to WAL at 1731243706425 (+3 ms)Closed at 1731243706425 2024-11-10T13:01:46,430 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:46,430 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:46,430 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:46,431 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:46,431 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:01:46,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36395 is added to blk_1073741847_1031 (size=757) 2024-11-10T13:01:46,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45455 is added to blk_1073741847_1031 (size=757) 2024-11-10T13:01:46,971 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:46,976 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:47,333 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:47,333 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:47,346 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:47,346 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:47,346 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:47,346 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:47,346 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:47,347 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:47,349 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:47,349 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:47,350 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:47,351 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:47,355 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:47,355 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:47,857 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-10T13:01:47,859 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:47,860 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:47,860 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:47,860 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:47,876 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:47,876 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:47,876 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:47,877 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:47,877 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:47,877 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:47,880 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:47,880 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:47,880 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:47,882 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:47,972 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:47,976 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:48,972 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:48,977 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:49,194 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1013: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-10T13:01:49,973 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:49,977 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:50,212 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/MasterData/WALs/3857ccc89b65,37415,1731243676301/3857ccc89b65%2C37415%2C1731243676301.1731243676429 after 4001ms 2024-11-10T13:01:50,213 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/MasterData/WALs/3857ccc89b65,37415,1731243676301/3857ccc89b65%2C37415%2C1731243676301.1731243676429 to hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/MasterData/oldWALs/3857ccc89b65%2C37415%2C1731243676301.1731243676429 2024-11-10T13:01:50,215 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/MasterData/oldWALs/3857ccc89b65%2C37415%2C1731243676301.1731243676429 to hdfs://localhost:41197/user/jenkins/test-data/b2615567-4682-a176-b2a8-4641c34a27cc/oldWALs/3857ccc89b65%2C37415%2C1731243676301.1731243676429$masterlocalwal$ 2024-11-10T13:01:50,215 INFO [M:0;3857ccc89b65:37415 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-10T13:01:50,215 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-10T13:01:50,215 INFO [M:0;3857ccc89b65:37415 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37415 2024-11-10T13:01:50,216 INFO [M:0;3857ccc89b65:37415 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-10T13:01:50,318 INFO [M:0;3857ccc89b65:37415 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-10T13:01:50,318 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37415-0x10101f7fd0c0000, quorum=127.0.0.1:56422, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T13:01:50,318 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37415-0x10101f7fd0c0000, quorum=127.0.0.1:56422, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T13:01:50,320 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@468f57bd{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T13:01:50,320 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5091fc79{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T13:01:50,320 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T13:01:50,321 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2ebbad67{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T13:01:50,321 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7ff0f915{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fa461a4c-9859-1454-6fc7-21397fa2a460/hadoop.log.dir/,STOPPED} 2024-11-10T13:01:50,322 WARN [BP-1340826382-172.17.0.2-1731243675520 heartbeating to localhost/127.0.0.1:41197 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-10T13:01:50,322 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-10T13:01:50,322 WARN [BP-1340826382-172.17.0.2-1731243675520 heartbeating to localhost/127.0.0.1:41197 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1340826382-172.17.0.2-1731243675520 (Datanode Uuid 50db00b8-c5f1-42c8-ba90-2937de5d7276) service to localhost/127.0.0.1:41197 2024-11-10T13:01:50,322 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-10T13:01:50,322 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fa461a4c-9859-1454-6fc7-21397fa2a460/cluster_ab5ceb79-a767-ff57-4846-5df93c62585a/data/data3/current/BP-1340826382-172.17.0.2-1731243675520 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T13:01:50,323 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fa461a4c-9859-1454-6fc7-21397fa2a460/cluster_ab5ceb79-a767-ff57-4846-5df93c62585a/data/data4/current/BP-1340826382-172.17.0.2-1731243675520 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T13:01:50,323 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-10T13:01:50,325 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@78dcc37b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T13:01:50,325 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5a382d25{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T13:01:50,325 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T13:01:50,325 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@32c717fb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T13:01:50,326 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@45604664{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fa461a4c-9859-1454-6fc7-21397fa2a460/hadoop.log.dir/,STOPPED} 2024-11-10T13:01:50,327 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-10T13:01:50,327 WARN [BP-1340826382-172.17.0.2-1731243675520 heartbeating to localhost/127.0.0.1:41197 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-10T13:01:50,327 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-10T13:01:50,327 WARN [BP-1340826382-172.17.0.2-1731243675520 heartbeating to localhost/127.0.0.1:41197 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1340826382-172.17.0.2-1731243675520 (Datanode Uuid bb9fc405-77e9-4a4e-ade7-ab4468b8eae9) service to localhost/127.0.0.1:41197 2024-11-10T13:01:50,327 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fa461a4c-9859-1454-6fc7-21397fa2a460/cluster_ab5ceb79-a767-ff57-4846-5df93c62585a/data/data1/current/BP-1340826382-172.17.0.2-1731243675520 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T13:01:50,327 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fa461a4c-9859-1454-6fc7-21397fa2a460/cluster_ab5ceb79-a767-ff57-4846-5df93c62585a/data/data2/current/BP-1340826382-172.17.0.2-1731243675520 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T13:01:50,328 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-10T13:01:50,333 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@53a4c428{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-10T13:01:50,333 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@347a2271{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T13:01:50,333 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T13:01:50,334 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@421a8f73{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T13:01:50,334 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4a3a743f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fa461a4c-9859-1454-6fc7-21397fa2a460/hadoop.log.dir/,STOPPED} 2024-11-10T13:01:50,340 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-10T13:01:50,357 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-10T13:01:50,365 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=181 (was 155) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41197 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41197 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41197 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41197 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41197 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41197 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Command processor java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:41197 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:41197 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=455 (was 450) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=139 (was 34) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=8043 (was 8665) 2024-11-10T13:01:50,373 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=181, OpenFileDescriptor=455, MaxFileDescriptor=1048576, SystemLoadAverage=139, ProcessCount=11, AvailableMemoryMB=8043 2024-11-10T13:01:50,373 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-10T13:01:50,373 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fa461a4c-9859-1454-6fc7-21397fa2a460/hadoop.log.dir so I do NOT create it in target/test-data/b037f8cb-13dc-91a9-da9b-b5da201d4ac9 2024-11-10T13:01:50,373 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fa461a4c-9859-1454-6fc7-21397fa2a460/hadoop.tmp.dir so I do NOT create it in target/test-data/b037f8cb-13dc-91a9-da9b-b5da201d4ac9 2024-11-10T13:01:50,373 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b037f8cb-13dc-91a9-da9b-b5da201d4ac9/cluster_edfbdfca-8937-9eaf-525c-a945f7224ae0, deleteOnExit=true 2024-11-10T13:01:50,373 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-10T13:01:50,373 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b037f8cb-13dc-91a9-da9b-b5da201d4ac9/test.cache.data in system properties and HBase conf 2024-11-10T13:01:50,373 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b037f8cb-13dc-91a9-da9b-b5da201d4ac9/hadoop.tmp.dir in system properties and HBase conf 2024-11-10T13:01:50,373 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b037f8cb-13dc-91a9-da9b-b5da201d4ac9/hadoop.log.dir in system properties and HBase conf 2024-11-10T13:01:50,373 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b037f8cb-13dc-91a9-da9b-b5da201d4ac9/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-10T13:01:50,373 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b037f8cb-13dc-91a9-da9b-b5da201d4ac9/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-10T13:01:50,373 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-10T13:01:50,374 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-10T13:01:50,374 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b037f8cb-13dc-91a9-da9b-b5da201d4ac9/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-10T13:01:50,374 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b037f8cb-13dc-91a9-da9b-b5da201d4ac9/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-10T13:01:50,374 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b037f8cb-13dc-91a9-da9b-b5da201d4ac9/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-10T13:01:50,374 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b037f8cb-13dc-91a9-da9b-b5da201d4ac9/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-10T13:01:50,374 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b037f8cb-13dc-91a9-da9b-b5da201d4ac9/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-10T13:01:50,374 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b037f8cb-13dc-91a9-da9b-b5da201d4ac9/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-10T13:01:50,374 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b037f8cb-13dc-91a9-da9b-b5da201d4ac9/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-10T13:01:50,374 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b037f8cb-13dc-91a9-da9b-b5da201d4ac9/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-10T13:01:50,374 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b037f8cb-13dc-91a9-da9b-b5da201d4ac9/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-10T13:01:50,374 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b037f8cb-13dc-91a9-da9b-b5da201d4ac9/nfs.dump.dir in system properties and HBase conf 2024-11-10T13:01:50,374 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b037f8cb-13dc-91a9-da9b-b5da201d4ac9/java.io.tmpdir in system properties and HBase conf 2024-11-10T13:01:50,374 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b037f8cb-13dc-91a9-da9b-b5da201d4ac9/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-10T13:01:50,374 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b037f8cb-13dc-91a9-da9b-b5da201d4ac9/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-10T13:01:50,374 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b037f8cb-13dc-91a9-da9b-b5da201d4ac9/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-10T13:01:50,387 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-10T13:01:50,457 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T13:01:50,462 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T13:01:50,483 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T13:01:50,483 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T13:01:50,483 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-10T13:01:50,485 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T13:01:50,489 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@232381c8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b037f8cb-13dc-91a9-da9b-b5da201d4ac9/hadoop.log.dir/,AVAILABLE} 2024-11-10T13:01:50,490 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@48b18cf{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T13:01:50,620 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@29b40997{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b037f8cb-13dc-91a9-da9b-b5da201d4ac9/java.io.tmpdir/jetty-localhost-42511-hadoop-hdfs-3_4_1-tests_jar-_-any-9443541843385490946/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-10T13:01:50,620 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@37ea919c{HTTP/1.1, (http/1.1)}{localhost:42511} 2024-11-10T13:01:50,620 INFO [Time-limited test {}] server.Server(415): Started @183591ms 2024-11-10T13:01:50,634 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-10T13:01:50,698 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T13:01:50,700 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T13:01:50,701 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T13:01:50,701 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T13:01:50,701 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-10T13:01:50,704 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3fa18b90{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b037f8cb-13dc-91a9-da9b-b5da201d4ac9/hadoop.log.dir/,AVAILABLE} 2024-11-10T13:01:50,705 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@20ad0bae{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T13:01:50,836 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@20e4ef1d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b037f8cb-13dc-91a9-da9b-b5da201d4ac9/java.io.tmpdir/jetty-localhost-39943-hadoop-hdfs-3_4_1-tests_jar-_-any-6060528997834176386/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T13:01:50,836 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1fc56883{HTTP/1.1, (http/1.1)}{localhost:39943} 2024-11-10T13:01:50,836 INFO [Time-limited test {}] server.Server(415): Started @183807ms 2024-11-10T13:01:50,838 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-10T13:01:50,871 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T13:01:50,874 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T13:01:50,876 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T13:01:50,876 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T13:01:50,876 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-10T13:01:50,876 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2fa194e5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b037f8cb-13dc-91a9-da9b-b5da201d4ac9/hadoop.log.dir/,AVAILABLE} 2024-11-10T13:01:50,876 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5ca63b58{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T13:01:50,943 WARN [Thread-1639 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b037f8cb-13dc-91a9-da9b-b5da201d4ac9/cluster_edfbdfca-8937-9eaf-525c-a945f7224ae0/data/data1/current/BP-1300434893-172.17.0.2-1731243710403/current, will proceed with Du for space computation calculation, 2024-11-10T13:01:50,944 WARN [Thread-1640 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b037f8cb-13dc-91a9-da9b-b5da201d4ac9/cluster_edfbdfca-8937-9eaf-525c-a945f7224ae0/data/data2/current/BP-1300434893-172.17.0.2-1731243710403/current, will proceed with Du for space computation calculation, 2024-11-10T13:01:50,968 WARN [Thread-1618 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-10T13:01:50,971 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x76317ea98e7b5b3 with lease ID 0xf0d97bde0e89d75e: Processing first storage report for DS-82646d9b-c514-43c8-8774-89fc9aaa4309 from datanode DatanodeRegistration(127.0.0.1:46011, datanodeUuid=bc60de89-34db-4f29-b074-a5cd4c9ae460, infoPort=46499, infoSecurePort=0, ipcPort=39663, storageInfo=lv=-57;cid=testClusterID;nsid=1273101240;c=1731243710403) 2024-11-10T13:01:50,971 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x76317ea98e7b5b3 with lease ID 0xf0d97bde0e89d75e: from storage DS-82646d9b-c514-43c8-8774-89fc9aaa4309 node DatanodeRegistration(127.0.0.1:46011, datanodeUuid=bc60de89-34db-4f29-b074-a5cd4c9ae460, infoPort=46499, infoSecurePort=0, ipcPort=39663, storageInfo=lv=-57;cid=testClusterID;nsid=1273101240;c=1731243710403), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T13:01:50,971 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x76317ea98e7b5b3 with lease ID 0xf0d97bde0e89d75e: Processing first storage report for DS-2c5d58f1-4ac4-4d48-983e-d8e806a1efb7 from datanode DatanodeRegistration(127.0.0.1:46011, datanodeUuid=bc60de89-34db-4f29-b074-a5cd4c9ae460, infoPort=46499, infoSecurePort=0, ipcPort=39663, storageInfo=lv=-57;cid=testClusterID;nsid=1273101240;c=1731243710403) 2024-11-10T13:01:50,971 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x76317ea98e7b5b3 with lease ID 0xf0d97bde0e89d75e: from storage DS-2c5d58f1-4ac4-4d48-983e-d8e806a1efb7 node DatanodeRegistration(127.0.0.1:46011, datanodeUuid=bc60de89-34db-4f29-b074-a5cd4c9ae460, infoPort=46499, infoSecurePort=0, ipcPort=39663, storageInfo=lv=-57;cid=testClusterID;nsid=1273101240;c=1731243710403), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T13:01:50,973 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:50,977 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:50,998 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1bfe295d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b037f8cb-13dc-91a9-da9b-b5da201d4ac9/java.io.tmpdir/jetty-localhost-44841-hadoop-hdfs-3_4_1-tests_jar-_-any-5979696153012125707/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T13:01:50,998 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@22a4ff4e{HTTP/1.1, (http/1.1)}{localhost:44841} 2024-11-10T13:01:50,998 INFO [Time-limited test {}] server.Server(415): Started @183969ms 2024-11-10T13:01:51,000 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-10T13:01:51,122 WARN [Thread-1665 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b037f8cb-13dc-91a9-da9b-b5da201d4ac9/cluster_edfbdfca-8937-9eaf-525c-a945f7224ae0/data/data3/current/BP-1300434893-172.17.0.2-1731243710403/current, will proceed with Du for space computation calculation, 2024-11-10T13:01:51,123 WARN [Thread-1666 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b037f8cb-13dc-91a9-da9b-b5da201d4ac9/cluster_edfbdfca-8937-9eaf-525c-a945f7224ae0/data/data4/current/BP-1300434893-172.17.0.2-1731243710403/current, will proceed with Du for space computation calculation, 2024-11-10T13:01:51,157 WARN [Thread-1654 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-10T13:01:51,159 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6395d2fa0764b9d8 with lease ID 0xf0d97bde0e89d75f: Processing first storage report for DS-786eab40-d579-4b0c-9b03-eb1e59f0da42 from datanode DatanodeRegistration(127.0.0.1:37885, datanodeUuid=abdb9076-8bca-4f9d-a844-e54e13f1b783, infoPort=44813, infoSecurePort=0, ipcPort=44591, storageInfo=lv=-57;cid=testClusterID;nsid=1273101240;c=1731243710403) 2024-11-10T13:01:51,159 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6395d2fa0764b9d8 with lease ID 0xf0d97bde0e89d75f: from storage DS-786eab40-d579-4b0c-9b03-eb1e59f0da42 node DatanodeRegistration(127.0.0.1:37885, datanodeUuid=abdb9076-8bca-4f9d-a844-e54e13f1b783, infoPort=44813, infoSecurePort=0, ipcPort=44591, storageInfo=lv=-57;cid=testClusterID;nsid=1273101240;c=1731243710403), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T13:01:51,159 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6395d2fa0764b9d8 with lease ID 0xf0d97bde0e89d75f: Processing first storage report for DS-6aada3c9-0f4e-4947-bcd7-55f0e043c8fd from datanode DatanodeRegistration(127.0.0.1:37885, datanodeUuid=abdb9076-8bca-4f9d-a844-e54e13f1b783, infoPort=44813, infoSecurePort=0, ipcPort=44591, storageInfo=lv=-57;cid=testClusterID;nsid=1273101240;c=1731243710403) 2024-11-10T13:01:51,159 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6395d2fa0764b9d8 with lease ID 0xf0d97bde0e89d75f: from storage DS-6aada3c9-0f4e-4947-bcd7-55f0e043c8fd node DatanodeRegistration(127.0.0.1:37885, datanodeUuid=abdb9076-8bca-4f9d-a844-e54e13f1b783, infoPort=44813, infoSecurePort=0, ipcPort=44591, storageInfo=lv=-57;cid=testClusterID;nsid=1273101240;c=1731243710403), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T13:01:51,234 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b037f8cb-13dc-91a9-da9b-b5da201d4ac9 2024-11-10T13:01:51,236 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b037f8cb-13dc-91a9-da9b-b5da201d4ac9/cluster_edfbdfca-8937-9eaf-525c-a945f7224ae0/zookeeper_0, clientPort=57635, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b037f8cb-13dc-91a9-da9b-b5da201d4ac9/cluster_edfbdfca-8937-9eaf-525c-a945f7224ae0/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b037f8cb-13dc-91a9-da9b-b5da201d4ac9/cluster_edfbdfca-8937-9eaf-525c-a945f7224ae0/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-10T13:01:51,237 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=57635 2024-11-10T13:01:51,238 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:01:51,239 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:01:51,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37885 is added to blk_1073741825_1001 (size=7) 2024-11-10T13:01:51,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46011 is added to blk_1073741825_1001 (size=7) 2024-11-10T13:01:51,248 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce with version=8 2024-11-10T13:01:51,248 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/hbase-staging 2024-11-10T13:01:51,250 INFO [Time-limited test {}] client.ConnectionUtils(128): master/3857ccc89b65:0 server-side Connection retries=45 2024-11-10T13:01:51,250 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T13:01:51,250 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-10T13:01:51,250 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-10T13:01:51,250 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T13:01:51,250 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-10T13:01:51,250 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-10T13:01:51,250 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-10T13:01:51,251 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33021 2024-11-10T13:01:51,252 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:33021 connecting to ZooKeeper ensemble=127.0.0.1:57635 2024-11-10T13:01:51,261 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:330210x0, quorum=127.0.0.1:57635, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-10T13:01:51,262 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:33021-0x10101f885930000 connected 2024-11-10T13:01:51,280 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:01:51,281 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:01:51,283 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33021-0x10101f885930000, quorum=127.0.0.1:57635, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T13:01:51,283 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce, hbase.cluster.distributed=false 2024-11-10T13:01:51,285 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33021-0x10101f885930000, quorum=127.0.0.1:57635, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-10T13:01:51,286 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33021 2024-11-10T13:01:51,286 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33021 2024-11-10T13:01:51,287 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33021 2024-11-10T13:01:51,287 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33021 2024-11-10T13:01:51,287 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33021 2024-11-10T13:01:51,302 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3857ccc89b65:0 server-side Connection retries=45 2024-11-10T13:01:51,302 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T13:01:51,302 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-10T13:01:51,302 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-10T13:01:51,302 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T13:01:51,302 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-10T13:01:51,303 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-10T13:01:51,303 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-10T13:01:51,303 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43055 2024-11-10T13:01:51,305 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43055 connecting to ZooKeeper ensemble=127.0.0.1:57635 2024-11-10T13:01:51,305 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:01:51,307 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:01:51,311 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:430550x0, quorum=127.0.0.1:57635, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-10T13:01:51,311 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:430550x0, quorum=127.0.0.1:57635, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T13:01:51,311 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43055-0x10101f885930001 connected 2024-11-10T13:01:51,311 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-10T13:01:51,312 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-10T13:01:51,312 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43055-0x10101f885930001, quorum=127.0.0.1:57635, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-10T13:01:51,314 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43055-0x10101f885930001, quorum=127.0.0.1:57635, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-10T13:01:51,314 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43055 2024-11-10T13:01:51,314 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43055 2024-11-10T13:01:51,314 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43055 2024-11-10T13:01:51,315 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43055 2024-11-10T13:01:51,315 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43055 2024-11-10T13:01:51,327 DEBUG [M:0;3857ccc89b65:33021 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;3857ccc89b65:33021 2024-11-10T13:01:51,327 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/3857ccc89b65,33021,1731243711250 2024-11-10T13:01:51,328 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33021-0x10101f885930000, quorum=127.0.0.1:57635, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T13:01:51,328 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43055-0x10101f885930001, quorum=127.0.0.1:57635, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T13:01:51,329 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33021-0x10101f885930000, quorum=127.0.0.1:57635, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/3857ccc89b65,33021,1731243711250 2024-11-10T13:01:51,330 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43055-0x10101f885930001, quorum=127.0.0.1:57635, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-10T13:01:51,330 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33021-0x10101f885930000, quorum=127.0.0.1:57635, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:01:51,330 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43055-0x10101f885930001, quorum=127.0.0.1:57635, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:01:51,331 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33021-0x10101f885930000, quorum=127.0.0.1:57635, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-10T13:01:51,331 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/3857ccc89b65,33021,1731243711250 from backup master directory 2024-11-10T13:01:51,337 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33021-0x10101f885930000, quorum=127.0.0.1:57635, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/3857ccc89b65,33021,1731243711250 2024-11-10T13:01:51,338 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43055-0x10101f885930001, quorum=127.0.0.1:57635, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T13:01:51,338 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33021-0x10101f885930000, quorum=127.0.0.1:57635, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T13:01:51,338 WARN [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-10T13:01:51,338 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=3857ccc89b65,33021,1731243711250 2024-11-10T13:01:51,341 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/hbase.id] with ID: 8625c0cd-3193-47b1-9659-832bb8b7a8fe 2024-11-10T13:01:51,341 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/.tmp/hbase.id 2024-11-10T13:01:51,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37885 is added to blk_1073741826_1002 (size=42) 2024-11-10T13:01:51,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46011 is added to blk_1073741826_1002 (size=42) 2024-11-10T13:01:51,347 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/.tmp/hbase.id]:[hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/hbase.id] 2024-11-10T13:01:51,358 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:01:51,358 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-10T13:01:51,359 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-10T13:01:51,365 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43055-0x10101f885930001, quorum=127.0.0.1:57635, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:01:51,365 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33021-0x10101f885930000, quorum=127.0.0.1:57635, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:01:51,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37885 is added to blk_1073741827_1003 (size=196) 2024-11-10T13:01:51,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46011 is added to blk_1073741827_1003 (size=196) 2024-11-10T13:01:51,375 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-10T13:01:51,376 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-10T13:01:51,376 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-10T13:01:51,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37885 is added to blk_1073741828_1004 (size=1189) 2024-11-10T13:01:51,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46011 is added to blk_1073741828_1004 (size=1189) 2024-11-10T13:01:51,384 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/MasterData/data/master/store 2024-11-10T13:01:51,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46011 is added to blk_1073741829_1005 (size=34) 2024-11-10T13:01:51,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37885 is added to blk_1073741829_1005 (size=34) 2024-11-10T13:01:51,397 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T13:01:51,397 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-10T13:01:51,397 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T13:01:51,397 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T13:01:51,397 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-10T13:01:51,397 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T13:01:51,397 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T13:01:51,397 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731243711397Disabling compacts and flushes for region at 1731243711397Disabling writes for close at 1731243711397Writing region close event to WAL at 1731243711397Closed at 1731243711397 2024-11-10T13:01:51,398 WARN [master/3857ccc89b65:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/MasterData/data/master/store/.initializing 2024-11-10T13:01:51,398 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/MasterData/WALs/3857ccc89b65,33021,1731243711250 2024-11-10T13:01:51,401 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3857ccc89b65%2C33021%2C1731243711250, suffix=, logDir=hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/MasterData/WALs/3857ccc89b65,33021,1731243711250, archiveDir=hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/MasterData/oldWALs, maxLogs=10 2024-11-10T13:01:51,401 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3857ccc89b65%2C33021%2C1731243711250.1731243711401 2024-11-10T13:01:51,409 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/MasterData/WALs/3857ccc89b65,33021,1731243711250/3857ccc89b65%2C33021%2C1731243711250.1731243711401 2024-11-10T13:01:51,410 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44813:44813),(127.0.0.1/127.0.0.1:46499:46499)] 2024-11-10T13:01:51,411 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-10T13:01:51,412 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T13:01:51,412 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:01:51,412 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:01:51,413 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:01:51,415 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-10T13:01:51,415 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:01:51,416 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:01:51,416 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:01:51,417 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-10T13:01:51,417 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:01:51,418 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T13:01:51,418 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:01:51,419 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-10T13:01:51,419 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:01:51,420 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T13:01:51,420 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:01:51,422 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-10T13:01:51,422 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:01:51,423 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T13:01:51,423 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:01:51,424 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:01:51,424 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:01:51,426 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:01:51,426 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:01:51,427 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-10T13:01:51,428 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:01:51,430 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T13:01:51,431 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=873114, jitterRate=0.11022242903709412}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-10T13:01:51,432 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731243711412Initializing all the Stores at 1731243711413 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731243711413Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731243711413Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731243711413Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731243711413Cleaning up temporary data from old regions at 1731243711426 (+13 ms)Region opened successfully at 1731243711432 (+6 ms) 2024-11-10T13:01:51,432 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-10T13:01:51,436 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@40d46aed, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3857ccc89b65/172.17.0.2:0 2024-11-10T13:01:51,437 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-10T13:01:51,437 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-10T13:01:51,437 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-10T13:01:51,437 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-10T13:01:51,438 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-10T13:01:51,438 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-10T13:01:51,438 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-10T13:01:51,440 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-10T13:01:51,441 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33021-0x10101f885930000, quorum=127.0.0.1:57635, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-10T13:01:51,442 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-10T13:01:51,442 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-10T13:01:51,443 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33021-0x10101f885930000, quorum=127.0.0.1:57635, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-10T13:01:51,444 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-10T13:01:51,444 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-10T13:01:51,445 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33021-0x10101f885930000, quorum=127.0.0.1:57635, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-10T13:01:51,448 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-10T13:01:51,448 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33021-0x10101f885930000, quorum=127.0.0.1:57635, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-10T13:01:51,449 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-10T13:01:51,451 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33021-0x10101f885930000, quorum=127.0.0.1:57635, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-10T13:01:51,452 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-10T13:01:51,453 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33021-0x10101f885930000, quorum=127.0.0.1:57635, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-10T13:01:51,453 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43055-0x10101f885930001, quorum=127.0.0.1:57635, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-10T13:01:51,453 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33021-0x10101f885930000, quorum=127.0.0.1:57635, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:01:51,453 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43055-0x10101f885930001, quorum=127.0.0.1:57635, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:01:51,454 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=3857ccc89b65,33021,1731243711250, sessionid=0x10101f885930000, setting cluster-up flag (Was=false) 2024-11-10T13:01:51,458 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43055-0x10101f885930001, quorum=127.0.0.1:57635, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:01:51,458 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33021-0x10101f885930000, quorum=127.0.0.1:57635, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:01:51,464 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-10T13:01:51,465 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3857ccc89b65,33021,1731243711250 2024-11-10T13:01:51,467 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33021-0x10101f885930000, quorum=127.0.0.1:57635, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:01:51,467 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43055-0x10101f885930001, quorum=127.0.0.1:57635, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:01:51,472 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-10T13:01:51,473 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3857ccc89b65,33021,1731243711250 2024-11-10T13:01:51,474 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-10T13:01:51,476 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-10T13:01:51,476 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-10T13:01:51,476 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-10T13:01:51,476 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 3857ccc89b65,33021,1731243711250 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-10T13:01:51,477 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/3857ccc89b65:0, corePoolSize=5, maxPoolSize=5 2024-11-10T13:01:51,477 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/3857ccc89b65:0, corePoolSize=5, maxPoolSize=5 2024-11-10T13:01:51,477 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/3857ccc89b65:0, corePoolSize=5, maxPoolSize=5 2024-11-10T13:01:51,477 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/3857ccc89b65:0, corePoolSize=5, maxPoolSize=5 2024-11-10T13:01:51,477 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/3857ccc89b65:0, corePoolSize=10, maxPoolSize=10 2024-11-10T13:01:51,477 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:01:51,477 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/3857ccc89b65:0, corePoolSize=2, maxPoolSize=2 2024-11-10T13:01:51,477 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:01:51,481 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731243741481 2024-11-10T13:01:51,481 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-10T13:01:51,481 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-10T13:01:51,481 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-10T13:01:51,482 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-10T13:01:51,482 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-10T13:01:51,482 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-10T13:01:51,482 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-10T13:01:51,482 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-10T13:01:51,482 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-10T13:01:51,482 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-10T13:01:51,482 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-10T13:01:51,482 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-10T13:01:51,483 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:01:51,483 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-10T13:01:51,483 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-10T13:01:51,483 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-10T13:01:51,483 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/3857ccc89b65:0:becomeActiveMaster-HFileCleaner.large.0-1731243711483,5,FailOnTimeoutGroup] 2024-11-10T13:01:51,484 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/3857ccc89b65:0:becomeActiveMaster-HFileCleaner.small.0-1731243711483,5,FailOnTimeoutGroup] 2024-11-10T13:01:51,484 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-10T13:01:51,484 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-10T13:01:51,484 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-10T13:01:51,484 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-10T13:01:51,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46011 is added to blk_1073741831_1007 (size=1321) 2024-11-10T13:01:51,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37885 is added to blk_1073741831_1007 (size=1321) 2024-11-10T13:01:51,492 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-10T13:01:51,493 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce 2024-11-10T13:01:51,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46011 is added to blk_1073741832_1008 (size=32) 2024-11-10T13:01:51,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37885 is added to blk_1073741832_1008 (size=32) 2024-11-10T13:01:51,500 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T13:01:51,501 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-10T13:01:51,502 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-10T13:01:51,502 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:01:51,502 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:01:51,502 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-10T13:01:51,503 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-10T13:01:51,503 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:01:51,504 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:01:51,504 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-10T13:01:51,505 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-10T13:01:51,505 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:01:51,505 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:01:51,505 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-10T13:01:51,506 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-10T13:01:51,506 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:01:51,506 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:01:51,507 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-10T13:01:51,507 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/data/hbase/meta/1588230740 2024-11-10T13:01:51,508 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/data/hbase/meta/1588230740 2024-11-10T13:01:51,509 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-10T13:01:51,509 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-10T13:01:51,510 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-10T13:01:51,511 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-10T13:01:51,512 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T13:01:51,513 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=721136, jitterRate=-0.0830284059047699}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-10T13:01:51,514 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731243711500Initializing all the Stores at 1731243711500Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731243711500Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731243711500Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731243711500Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731243711500Cleaning up temporary data from old regions at 1731243711509 (+9 ms)Region opened successfully at 1731243711514 (+5 ms) 2024-11-10T13:01:51,514 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-10T13:01:51,514 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-10T13:01:51,514 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-10T13:01:51,514 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-10T13:01:51,514 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-10T13:01:51,514 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-10T13:01:51,514 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731243711514Disabling compacts and flushes for region at 1731243711514Disabling writes for close at 1731243711514Writing region close event to WAL at 1731243711514Closed at 1731243711514 2024-11-10T13:01:51,516 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-10T13:01:51,516 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-10T13:01:51,516 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-10T13:01:51,517 INFO [RS:0;3857ccc89b65:43055 {}] regionserver.HRegionServer(746): ClusterId : 8625c0cd-3193-47b1-9659-832bb8b7a8fe 2024-11-10T13:01:51,517 DEBUG [RS:0;3857ccc89b65:43055 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-10T13:01:51,517 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-10T13:01:51,519 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-10T13:01:51,521 DEBUG [RS:0;3857ccc89b65:43055 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-10T13:01:51,521 DEBUG [RS:0;3857ccc89b65:43055 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-10T13:01:51,523 DEBUG [RS:0;3857ccc89b65:43055 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-10T13:01:51,523 DEBUG [RS:0;3857ccc89b65:43055 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@630e4b30, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3857ccc89b65/172.17.0.2:0 2024-11-10T13:01:51,535 DEBUG [RS:0;3857ccc89b65:43055 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;3857ccc89b65:43055 2024-11-10T13:01:51,535 INFO [RS:0;3857ccc89b65:43055 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-10T13:01:51,535 INFO [RS:0;3857ccc89b65:43055 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-10T13:01:51,535 DEBUG [RS:0;3857ccc89b65:43055 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-10T13:01:51,536 INFO [RS:0;3857ccc89b65:43055 {}] regionserver.HRegionServer(2659): reportForDuty to master=3857ccc89b65,33021,1731243711250 with port=43055, startcode=1731243711302 2024-11-10T13:01:51,536 DEBUG [RS:0;3857ccc89b65:43055 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-10T13:01:51,538 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54717, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-10T13:01:51,538 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33021 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3857ccc89b65,43055,1731243711302 2024-11-10T13:01:51,538 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33021 {}] master.ServerManager(517): Registering regionserver=3857ccc89b65,43055,1731243711302 2024-11-10T13:01:51,540 DEBUG [RS:0;3857ccc89b65:43055 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce 2024-11-10T13:01:51,540 DEBUG [RS:0;3857ccc89b65:43055 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37879 2024-11-10T13:01:51,540 DEBUG [RS:0;3857ccc89b65:43055 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-10T13:01:51,543 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33021-0x10101f885930000, quorum=127.0.0.1:57635, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-10T13:01:51,543 DEBUG [RS:0;3857ccc89b65:43055 {}] zookeeper.ZKUtil(111): regionserver:43055-0x10101f885930001, quorum=127.0.0.1:57635, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3857ccc89b65,43055,1731243711302 2024-11-10T13:01:51,543 WARN [RS:0;3857ccc89b65:43055 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-10T13:01:51,543 INFO [RS:0;3857ccc89b65:43055 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-10T13:01:51,543 DEBUG [RS:0;3857ccc89b65:43055 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/WALs/3857ccc89b65,43055,1731243711302 2024-11-10T13:01:51,544 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3857ccc89b65,43055,1731243711302] 2024-11-10T13:01:51,546 INFO [RS:0;3857ccc89b65:43055 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-10T13:01:51,548 INFO [RS:0;3857ccc89b65:43055 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-10T13:01:51,548 INFO [RS:0;3857ccc89b65:43055 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-10T13:01:51,548 INFO [RS:0;3857ccc89b65:43055 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T13:01:51,548 INFO [RS:0;3857ccc89b65:43055 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-10T13:01:51,549 INFO [RS:0;3857ccc89b65:43055 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-10T13:01:51,549 INFO [RS:0;3857ccc89b65:43055 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-10T13:01:51,549 DEBUG [RS:0;3857ccc89b65:43055 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:01:51,549 DEBUG [RS:0;3857ccc89b65:43055 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:01:51,549 DEBUG [RS:0;3857ccc89b65:43055 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:01:51,549 DEBUG [RS:0;3857ccc89b65:43055 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:01:51,549 DEBUG [RS:0;3857ccc89b65:43055 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:01:51,549 DEBUG [RS:0;3857ccc89b65:43055 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3857ccc89b65:0, corePoolSize=2, maxPoolSize=2 2024-11-10T13:01:51,550 DEBUG [RS:0;3857ccc89b65:43055 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:01:51,550 DEBUG [RS:0;3857ccc89b65:43055 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:01:51,550 DEBUG [RS:0;3857ccc89b65:43055 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:01:51,550 DEBUG [RS:0;3857ccc89b65:43055 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:01:51,550 DEBUG [RS:0;3857ccc89b65:43055 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:01:51,550 DEBUG [RS:0;3857ccc89b65:43055 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:01:51,550 DEBUG [RS:0;3857ccc89b65:43055 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3857ccc89b65:0, corePoolSize=3, maxPoolSize=3 2024-11-10T13:01:51,550 DEBUG [RS:0;3857ccc89b65:43055 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3857ccc89b65:0, corePoolSize=3, maxPoolSize=3 2024-11-10T13:01:51,552 INFO [RS:0;3857ccc89b65:43055 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T13:01:51,552 INFO [RS:0;3857ccc89b65:43055 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T13:01:51,552 INFO [RS:0;3857ccc89b65:43055 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T13:01:51,552 INFO [RS:0;3857ccc89b65:43055 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-10T13:01:51,552 INFO [RS:0;3857ccc89b65:43055 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-10T13:01:51,552 INFO [RS:0;3857ccc89b65:43055 {}] hbase.ChoreService(168): Chore ScheduledChore name=3857ccc89b65,43055,1731243711302-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-10T13:01:51,566 INFO [RS:0;3857ccc89b65:43055 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-10T13:01:51,567 INFO [RS:0;3857ccc89b65:43055 {}] hbase.ChoreService(168): Chore ScheduledChore name=3857ccc89b65,43055,1731243711302-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T13:01:51,567 INFO [RS:0;3857ccc89b65:43055 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T13:01:51,567 INFO [RS:0;3857ccc89b65:43055 {}] regionserver.Replication(171): 3857ccc89b65,43055,1731243711302 started 2024-11-10T13:01:51,581 INFO [RS:0;3857ccc89b65:43055 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T13:01:51,581 INFO [RS:0;3857ccc89b65:43055 {}] regionserver.HRegionServer(1482): Serving as 3857ccc89b65,43055,1731243711302, RpcServer on 3857ccc89b65/172.17.0.2:43055, sessionid=0x10101f885930001 2024-11-10T13:01:51,581 DEBUG [RS:0;3857ccc89b65:43055 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-10T13:01:51,581 DEBUG [RS:0;3857ccc89b65:43055 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3857ccc89b65,43055,1731243711302 2024-11-10T13:01:51,581 DEBUG [RS:0;3857ccc89b65:43055 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3857ccc89b65,43055,1731243711302' 2024-11-10T13:01:51,581 DEBUG [RS:0;3857ccc89b65:43055 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-10T13:01:51,582 DEBUG [RS:0;3857ccc89b65:43055 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-10T13:01:51,582 DEBUG [RS:0;3857ccc89b65:43055 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-10T13:01:51,582 DEBUG [RS:0;3857ccc89b65:43055 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-10T13:01:51,582 DEBUG [RS:0;3857ccc89b65:43055 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3857ccc89b65,43055,1731243711302 2024-11-10T13:01:51,582 DEBUG [RS:0;3857ccc89b65:43055 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3857ccc89b65,43055,1731243711302' 2024-11-10T13:01:51,582 DEBUG [RS:0;3857ccc89b65:43055 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-10T13:01:51,583 DEBUG [RS:0;3857ccc89b65:43055 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-10T13:01:51,583 DEBUG [RS:0;3857ccc89b65:43055 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-10T13:01:51,583 INFO [RS:0;3857ccc89b65:43055 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-10T13:01:51,583 INFO [RS:0;3857ccc89b65:43055 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-10T13:01:51,669 WARN [3857ccc89b65:33021 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-10T13:01:51,685 INFO [RS:0;3857ccc89b65:43055 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3857ccc89b65%2C43055%2C1731243711302, suffix=, logDir=hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/WALs/3857ccc89b65,43055,1731243711302, archiveDir=hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/oldWALs, maxLogs=32 2024-11-10T13:01:51,686 INFO [RS:0;3857ccc89b65:43055 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3857ccc89b65%2C43055%2C1731243711302.1731243711686 2024-11-10T13:01:51,693 INFO [RS:0;3857ccc89b65:43055 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/WALs/3857ccc89b65,43055,1731243711302/3857ccc89b65%2C43055%2C1731243711302.1731243711686 2024-11-10T13:01:51,700 DEBUG [RS:0;3857ccc89b65:43055 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46499:46499),(127.0.0.1/127.0.0.1:44813:44813)] 2024-11-10T13:01:51,919 DEBUG [3857ccc89b65:33021 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-10T13:01:51,920 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=3857ccc89b65,43055,1731243711302 2024-11-10T13:01:51,921 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3857ccc89b65,43055,1731243711302, state=OPENING 2024-11-10T13:01:51,923 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-10T13:01:51,924 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43055-0x10101f885930001, quorum=127.0.0.1:57635, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:01:51,924 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33021-0x10101f885930000, quorum=127.0.0.1:57635, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:01:51,925 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T13:01:51,925 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T13:01:51,925 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-10T13:01:51,925 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=3857ccc89b65,43055,1731243711302}] 2024-11-10T13:01:51,974 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:51,978 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:52,079 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-10T13:01:52,080 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52773, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-10T13:01:52,084 INFO [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-10T13:01:52,084 INFO [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-10T13:01:52,086 INFO [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3857ccc89b65%2C43055%2C1731243711302.meta, suffix=.meta, logDir=hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/WALs/3857ccc89b65,43055,1731243711302, archiveDir=hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/oldWALs, maxLogs=32 2024-11-10T13:01:52,087 INFO [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 3857ccc89b65%2C43055%2C1731243711302.meta.1731243712086.meta 2024-11-10T13:01:52,091 INFO [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/WALs/3857ccc89b65,43055,1731243711302/3857ccc89b65%2C43055%2C1731243711302.meta.1731243712086.meta 2024-11-10T13:01:52,092 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46499:46499),(127.0.0.1/127.0.0.1:44813:44813)] 2024-11-10T13:01:52,095 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-10T13:01:52,096 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-10T13:01:52,096 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-10T13:01:52,096 INFO [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-10T13:01:52,096 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-10T13:01:52,096 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T13:01:52,096 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-10T13:01:52,096 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-10T13:01:52,098 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-10T13:01:52,099 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-10T13:01:52,099 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:01:52,099 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:01:52,099 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-10T13:01:52,100 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-10T13:01:52,100 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:01:52,100 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:01:52,100 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-10T13:01:52,101 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-10T13:01:52,101 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:01:52,102 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:01:52,102 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-10T13:01:52,102 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-10T13:01:52,102 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:01:52,103 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:01:52,103 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-10T13:01:52,103 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/data/hbase/meta/1588230740 2024-11-10T13:01:52,104 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/data/hbase/meta/1588230740 2024-11-10T13:01:52,105 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-10T13:01:52,105 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-10T13:01:52,106 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-10T13:01:52,107 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-10T13:01:52,108 INFO [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=877742, jitterRate=0.11610788106918335}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-10T13:01:52,108 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-10T13:01:52,108 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731243712096Writing region info on filesystem at 1731243712097 (+1 ms)Initializing all the Stores at 1731243712097Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731243712097Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731243712098 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731243712098Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731243712098Cleaning up temporary data from old regions at 1731243712105 (+7 ms)Running coprocessor post-open hooks at 1731243712108 (+3 ms)Region opened successfully at 1731243712108 2024-11-10T13:01:52,109 INFO [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731243712078 2024-11-10T13:01:52,112 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-10T13:01:52,112 INFO [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-10T13:01:52,113 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=3857ccc89b65,43055,1731243711302 2024-11-10T13:01:52,114 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3857ccc89b65,43055,1731243711302, state=OPEN 2024-11-10T13:01:52,119 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43055-0x10101f885930001, quorum=127.0.0.1:57635, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-10T13:01:52,119 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33021-0x10101f885930000, quorum=127.0.0.1:57635, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-10T13:01:52,119 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=3857ccc89b65,43055,1731243711302 2024-11-10T13:01:52,119 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T13:01:52,119 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T13:01:52,122 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-10T13:01:52,122 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=3857ccc89b65,43055,1731243711302 in 194 msec 2024-11-10T13:01:52,124 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-10T13:01:52,124 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 606 msec 2024-11-10T13:01:52,125 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-10T13:01:52,125 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-10T13:01:52,126 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-10T13:01:52,126 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3857ccc89b65,43055,1731243711302, seqNum=-1] 2024-11-10T13:01:52,127 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-10T13:01:52,128 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36691, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-10T13:01:52,133 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 658 msec 2024-11-10T13:01:52,134 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731243712134, completionTime=-1 2024-11-10T13:01:52,134 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-10T13:01:52,134 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-10T13:01:52,136 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-10T13:01:52,136 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731243772136 2024-11-10T13:01:52,136 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731243832136 2024-11-10T13:01:52,136 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-10T13:01:52,136 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3857ccc89b65,33021,1731243711250-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T13:01:52,136 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3857ccc89b65,33021,1731243711250-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T13:01:52,136 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3857ccc89b65,33021,1731243711250-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T13:01:52,136 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-3857ccc89b65:33021, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T13:01:52,136 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-10T13:01:52,137 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-10T13:01:52,138 DEBUG [master/3857ccc89b65:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-10T13:01:52,140 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.802sec 2024-11-10T13:01:52,140 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-10T13:01:52,140 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-10T13:01:52,140 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-10T13:01:52,140 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-10T13:01:52,140 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-10T13:01:52,140 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3857ccc89b65,33021,1731243711250-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-10T13:01:52,140 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3857ccc89b65,33021,1731243711250-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-10T13:01:52,142 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-10T13:01:52,142 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-10T13:01:52,142 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3857ccc89b65,33021,1731243711250-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T13:01:52,217 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5daea1f2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-10T13:01:52,217 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 3857ccc89b65,33021,-1 for getting cluster id 2024-11-10T13:01:52,217 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-10T13:01:52,220 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8625c0cd-3193-47b1-9659-832bb8b7a8fe' 2024-11-10T13:01:52,221 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-10T13:01:52,221 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8625c0cd-3193-47b1-9659-832bb8b7a8fe" 2024-11-10T13:01:52,221 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@637f45ab, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-10T13:01:52,222 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3857ccc89b65,33021,-1] 2024-11-10T13:01:52,222 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-10T13:01:52,222 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T13:01:52,224 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49862, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-10T13:01:52,225 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7fc2e521, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-10T13:01:52,226 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-10T13:01:52,227 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3857ccc89b65,43055,1731243711302, seqNum=-1] 2024-11-10T13:01:52,227 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-10T13:01:52,228 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46836, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-10T13:01:52,230 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=3857ccc89b65,33021,1731243711250 2024-11-10T13:01:52,230 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:01:52,233 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-10T13:01:52,233 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-10T13:01:52,234 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is 3857ccc89b65,33021,1731243711250 2024-11-10T13:01:52,234 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@59d28c86 2024-11-10T13:01:52,234 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-10T13:01:52,235 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49864, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-10T13:01:52,236 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33021 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-10T13:01:52,236 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33021 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-10T13:01:52,236 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33021 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-10T13:01:52,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33021 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-10T13:01:52,240 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-10T13:01:52,240 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:01:52,240 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33021 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-11-10T13:01:52,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33021 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-10T13:01:52,241 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-10T13:01:52,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46011 is added to blk_1073741835_1011 (size=405) 2024-11-10T13:01:52,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37885 is added to blk_1073741835_1011 (size=405) 2024-11-10T13:01:52,252 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 66fc512b5434e39aaeec8e55367a0047, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731243712236.66fc512b5434e39aaeec8e55367a0047.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce 2024-11-10T13:01:52,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37885 is added to blk_1073741836_1012 (size=88) 2024-11-10T13:01:52,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46011 is added to blk_1073741836_1012 (size=88) 2024-11-10T13:01:52,264 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731243712236.66fc512b5434e39aaeec8e55367a0047.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T13:01:52,265 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing 66fc512b5434e39aaeec8e55367a0047, disabling compactions & flushes 2024-11-10T13:01:52,265 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731243712236.66fc512b5434e39aaeec8e55367a0047. 2024-11-10T13:01:52,265 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731243712236.66fc512b5434e39aaeec8e55367a0047. 2024-11-10T13:01:52,265 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731243712236.66fc512b5434e39aaeec8e55367a0047. after waiting 0 ms 2024-11-10T13:01:52,265 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731243712236.66fc512b5434e39aaeec8e55367a0047. 2024-11-10T13:01:52,265 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731243712236.66fc512b5434e39aaeec8e55367a0047. 2024-11-10T13:01:52,265 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 66fc512b5434e39aaeec8e55367a0047: Waiting for close lock at 1731243712265Disabling compacts and flushes for region at 1731243712265Disabling writes for close at 1731243712265Writing region close event to WAL at 1731243712265Closed at 1731243712265 2024-11-10T13:01:52,266 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-10T13:01:52,267 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731243712236.66fc512b5434e39aaeec8e55367a0047.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1731243712266"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731243712266"}]},"ts":"1731243712266"} 2024-11-10T13:01:52,269 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-10T13:01:52,270 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-10T13:01:52,270 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731243712270"}]},"ts":"1731243712270"} 2024-11-10T13:01:52,273 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-11-10T13:01:52,273 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=66fc512b5434e39aaeec8e55367a0047, ASSIGN}] 2024-11-10T13:01:52,274 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=66fc512b5434e39aaeec8e55367a0047, ASSIGN 2024-11-10T13:01:52,276 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=66fc512b5434e39aaeec8e55367a0047, ASSIGN; state=OFFLINE, location=3857ccc89b65,43055,1731243711302; forceNewPlan=false, retain=false 2024-11-10T13:01:52,309 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-10T13:01:52,427 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=66fc512b5434e39aaeec8e55367a0047, regionState=OPENING, regionLocation=3857ccc89b65,43055,1731243711302 2024-11-10T13:01:52,430 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=66fc512b5434e39aaeec8e55367a0047, ASSIGN because future has completed 2024-11-10T13:01:52,431 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 66fc512b5434e39aaeec8e55367a0047, server=3857ccc89b65,43055,1731243711302}] 2024-11-10T13:01:52,588 INFO [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731243712236.66fc512b5434e39aaeec8e55367a0047. 2024-11-10T13:01:52,588 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 66fc512b5434e39aaeec8e55367a0047, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731243712236.66fc512b5434e39aaeec8e55367a0047.', STARTKEY => '', ENDKEY => ''} 2024-11-10T13:01:52,589 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling 66fc512b5434e39aaeec8e55367a0047 2024-11-10T13:01:52,589 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731243712236.66fc512b5434e39aaeec8e55367a0047.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T13:01:52,589 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 66fc512b5434e39aaeec8e55367a0047 2024-11-10T13:01:52,589 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 66fc512b5434e39aaeec8e55367a0047 2024-11-10T13:01:52,590 INFO [StoreOpener-66fc512b5434e39aaeec8e55367a0047-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 66fc512b5434e39aaeec8e55367a0047 2024-11-10T13:01:52,591 INFO [StoreOpener-66fc512b5434e39aaeec8e55367a0047-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 66fc512b5434e39aaeec8e55367a0047 columnFamilyName info 2024-11-10T13:01:52,591 DEBUG [StoreOpener-66fc512b5434e39aaeec8e55367a0047-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:01:52,592 INFO [StoreOpener-66fc512b5434e39aaeec8e55367a0047-1 {}] regionserver.HStore(327): Store=66fc512b5434e39aaeec8e55367a0047/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T13:01:52,592 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 66fc512b5434e39aaeec8e55367a0047 2024-11-10T13:01:52,593 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/66fc512b5434e39aaeec8e55367a0047 2024-11-10T13:01:52,593 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/66fc512b5434e39aaeec8e55367a0047 2024-11-10T13:01:52,593 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 66fc512b5434e39aaeec8e55367a0047 2024-11-10T13:01:52,593 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 66fc512b5434e39aaeec8e55367a0047 2024-11-10T13:01:52,595 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 66fc512b5434e39aaeec8e55367a0047 2024-11-10T13:01:52,596 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/66fc512b5434e39aaeec8e55367a0047/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T13:01:52,597 INFO [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 66fc512b5434e39aaeec8e55367a0047; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=825749, jitterRate=0.04999533295631409}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-10T13:01:52,597 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 66fc512b5434e39aaeec8e55367a0047 2024-11-10T13:01:52,597 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 66fc512b5434e39aaeec8e55367a0047: Running coprocessor pre-open hook at 1731243712589Writing region info on filesystem at 1731243712589Initializing all the Stores at 1731243712590 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731243712590Cleaning up temporary data from old regions at 1731243712593 (+3 ms)Running coprocessor post-open hooks at 1731243712597 (+4 ms)Region opened successfully at 1731243712597 2024-11-10T13:01:52,598 INFO [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731243712236.66fc512b5434e39aaeec8e55367a0047., pid=6, masterSystemTime=1731243712584 2024-11-10T13:01:52,601 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731243712236.66fc512b5434e39aaeec8e55367a0047. 2024-11-10T13:01:52,601 INFO [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731243712236.66fc512b5434e39aaeec8e55367a0047. 2024-11-10T13:01:52,602 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=66fc512b5434e39aaeec8e55367a0047, regionState=OPEN, openSeqNum=2, regionLocation=3857ccc89b65,43055,1731243711302 2024-11-10T13:01:52,604 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 66fc512b5434e39aaeec8e55367a0047, server=3857ccc89b65,43055,1731243711302 because future has completed 2024-11-10T13:01:52,608 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-10T13:01:52,608 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 66fc512b5434e39aaeec8e55367a0047, server=3857ccc89b65,43055,1731243711302 in 174 msec 2024-11-10T13:01:52,610 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-10T13:01:52,610 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=66fc512b5434e39aaeec8e55367a0047, ASSIGN in 335 msec 2024-11-10T13:01:52,611 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-10T13:01:52,612 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731243712611"}]},"ts":"1731243712611"} 2024-11-10T13:01:52,614 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-11-10T13:01:52,615 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-10T13:01:52,617 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 379 msec 2024-11-10T13:01:52,975 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:52,978 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:53,975 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:53,979 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:54,976 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:54,979 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:55,976 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:55,980 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:56,977 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:56,980 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:57,598 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-10T13:01:57,599 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:57,599 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:57,599 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:57,599 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:57,600 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:57,600 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:57,615 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:57,616 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:57,616 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:57,616 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:57,616 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:57,617 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:57,620 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:57,620 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:57,620 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:57,623 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:01:57,628 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-10T13:01:57,629 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-11-10T13:01:57,977 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:57,981 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:58,978 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:58,981 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:59,979 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:01:59,981 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:00,979 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:00,982 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:01,980 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:01,982 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:02,309 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-10T13:02:02,309 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-11-10T13:02:02,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33021 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-10T13:02:02,313 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-10T13:02:02,313 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-11-10T13:02:02,316 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-10T13:02:02,316 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731243712236.66fc512b5434e39aaeec8e55367a0047. 2024-11-10T13:02:02,319 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731243712236.66fc512b5434e39aaeec8e55367a0047., hostname=3857ccc89b65,43055,1731243711302, seqNum=2] 2024-11-10T13:02:02,326 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33021 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-10T13:02:02,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33021 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-10T13:02:02,334 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-10T13:02:02,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33021 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-10T13:02:02,335 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-10T13:02:02,336 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-10T13:02:02,497 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43055 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-10T13:02:02,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3857ccc89b65:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731243712236.66fc512b5434e39aaeec8e55367a0047. 2024-11-10T13:02:02,498 INFO [RS_FLUSH_OPERATIONS-regionserver/3857ccc89b65:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 66fc512b5434e39aaeec8e55367a0047 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-10T13:02:02,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3857ccc89b65:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/66fc512b5434e39aaeec8e55367a0047/.tmp/info/c033c45a2d604d39b200c365187d8246 is 1080, key is row0001/info:/1731243722320/Put/seqid=0 2024-11-10T13:02:02,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37885 is added to blk_1073741837_1013 (size=6033) 2024-11-10T13:02:02,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46011 is added to blk_1073741837_1013 (size=6033) 2024-11-10T13:02:02,520 INFO [RS_FLUSH_OPERATIONS-regionserver/3857ccc89b65:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/66fc512b5434e39aaeec8e55367a0047/.tmp/info/c033c45a2d604d39b200c365187d8246 2024-11-10T13:02:02,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3857ccc89b65:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/66fc512b5434e39aaeec8e55367a0047/.tmp/info/c033c45a2d604d39b200c365187d8246 as hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/66fc512b5434e39aaeec8e55367a0047/info/c033c45a2d604d39b200c365187d8246 2024-11-10T13:02:02,532 INFO [RS_FLUSH_OPERATIONS-regionserver/3857ccc89b65:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/66fc512b5434e39aaeec8e55367a0047/info/c033c45a2d604d39b200c365187d8246, entries=1, sequenceid=5, filesize=5.9 K 2024-11-10T13:02:02,533 INFO [RS_FLUSH_OPERATIONS-regionserver/3857ccc89b65:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 66fc512b5434e39aaeec8e55367a0047 in 36ms, sequenceid=5, compaction requested=false 2024-11-10T13:02:02,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3857ccc89b65:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 66fc512b5434e39aaeec8e55367a0047: 2024-11-10T13:02:02,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3857ccc89b65:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731243712236.66fc512b5434e39aaeec8e55367a0047. 2024-11-10T13:02:02,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3857ccc89b65:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-10T13:02:02,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33021 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-10T13:02:02,541 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-10T13:02:02,541 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 202 msec 2024-11-10T13:02:02,543 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 214 msec 2024-11-10T13:02:02,980 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:02,983 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:03,981 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:03,983 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:04,981 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:04,984 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:05,982 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:05,984 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:06,982 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:06,984 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:07,983 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:07,985 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:08,983 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:08,985 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:09,984 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:09,986 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:10,984 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:10,986 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:11,985 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:11,987 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:12,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33021 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-10T13:02:12,413 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-10T13:02:12,416 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33021 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-10T13:02:12,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33021 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-10T13:02:12,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33021 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-10T13:02:12,418 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-10T13:02:12,419 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-10T13:02:12,419 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-10T13:02:12,572 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43055 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-11-10T13:02:12,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3857ccc89b65:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731243712236.66fc512b5434e39aaeec8e55367a0047. 2024-11-10T13:02:12,573 INFO [RS_FLUSH_OPERATIONS-regionserver/3857ccc89b65:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing 66fc512b5434e39aaeec8e55367a0047 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-10T13:02:12,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3857ccc89b65:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/66fc512b5434e39aaeec8e55367a0047/.tmp/info/f6f6b24cfa47432d8d4222076dd0c769 is 1080, key is row0002/info:/1731243732414/Put/seqid=0 2024-11-10T13:02:12,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46011 is added to blk_1073741838_1014 (size=6033) 2024-11-10T13:02:12,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37885 is added to blk_1073741838_1014 (size=6033) 2024-11-10T13:02:12,583 INFO [RS_FLUSH_OPERATIONS-regionserver/3857ccc89b65:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/66fc512b5434e39aaeec8e55367a0047/.tmp/info/f6f6b24cfa47432d8d4222076dd0c769 2024-11-10T13:02:12,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3857ccc89b65:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/66fc512b5434e39aaeec8e55367a0047/.tmp/info/f6f6b24cfa47432d8d4222076dd0c769 as hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/66fc512b5434e39aaeec8e55367a0047/info/f6f6b24cfa47432d8d4222076dd0c769 2024-11-10T13:02:12,594 INFO [RS_FLUSH_OPERATIONS-regionserver/3857ccc89b65:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/66fc512b5434e39aaeec8e55367a0047/info/f6f6b24cfa47432d8d4222076dd0c769, entries=1, sequenceid=9, filesize=5.9 K 2024-11-10T13:02:12,595 INFO [RS_FLUSH_OPERATIONS-regionserver/3857ccc89b65:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 66fc512b5434e39aaeec8e55367a0047 in 22ms, sequenceid=9, compaction requested=false 2024-11-10T13:02:12,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3857ccc89b65:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for 66fc512b5434e39aaeec8e55367a0047: 2024-11-10T13:02:12,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3857ccc89b65:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731243712236.66fc512b5434e39aaeec8e55367a0047. 2024-11-10T13:02:12,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3857ccc89b65:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-11-10T13:02:12,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33021 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-11-10T13:02:12,599 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-10T13:02:12,599 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 178 msec 2024-11-10T13:02:12,602 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 184 msec 2024-11-10T13:02:12,986 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:12,987 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:13,986 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:13,987 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:14,987 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:14,988 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:15,987 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:15,988 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:16,988 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:16,988 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 after 68046ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor193.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:02:16,989 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:16,989 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta after 68032ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor193.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T13:02:17,989 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:17,989 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:18,989 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:18,990 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:19,990 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:19,990 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:20,990 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:20,991 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:21,233 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-10T13:02:21,991 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:21,991 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:22,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33021 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-10T13:02:22,443 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-10T13:02:22,449 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3857ccc89b65%2C43055%2C1731243711302.1731243742448 2024-11-10T13:02:22,454 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:02:22,454 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:02:22,454 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:02:22,454 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:02:22,454 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:02:22,454 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/WALs/3857ccc89b65,43055,1731243711302/3857ccc89b65%2C43055%2C1731243711302.1731243711686 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/WALs/3857ccc89b65,43055,1731243711302/3857ccc89b65%2C43055%2C1731243711302.1731243742448 2024-11-10T13:02:22,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37885 is added to blk_1073741833_1009 (size=5546) 2024-11-10T13:02:22,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46011 is added to blk_1073741833_1009 (size=5546) 2024-11-10T13:02:22,460 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46499:46499),(127.0.0.1/127.0.0.1:44813:44813)] 2024-11-10T13:02:22,461 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33021 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-10T13:02:22,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33021 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-10T13:02:22,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33021 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-10T13:02:22,463 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-10T13:02:22,464 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-10T13:02:22,464 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-10T13:02:22,617 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43055 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-11-10T13:02:22,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3857ccc89b65:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731243712236.66fc512b5434e39aaeec8e55367a0047. 2024-11-10T13:02:22,618 INFO [RS_FLUSH_OPERATIONS-regionserver/3857ccc89b65:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing 66fc512b5434e39aaeec8e55367a0047 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-10T13:02:22,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3857ccc89b65:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/66fc512b5434e39aaeec8e55367a0047/.tmp/info/dd698e9b304b4e999ae0c98f4d441fd0 is 1080, key is row0003/info:/1731243742445/Put/seqid=0 2024-11-10T13:02:22,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37885 is added to blk_1073741840_1016 (size=6033) 2024-11-10T13:02:22,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46011 is added to blk_1073741840_1016 (size=6033) 2024-11-10T13:02:22,629 INFO [RS_FLUSH_OPERATIONS-regionserver/3857ccc89b65:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/66fc512b5434e39aaeec8e55367a0047/.tmp/info/dd698e9b304b4e999ae0c98f4d441fd0 2024-11-10T13:02:22,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3857ccc89b65:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/66fc512b5434e39aaeec8e55367a0047/.tmp/info/dd698e9b304b4e999ae0c98f4d441fd0 as hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/66fc512b5434e39aaeec8e55367a0047/info/dd698e9b304b4e999ae0c98f4d441fd0 2024-11-10T13:02:22,642 INFO [RS_FLUSH_OPERATIONS-regionserver/3857ccc89b65:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/66fc512b5434e39aaeec8e55367a0047/info/dd698e9b304b4e999ae0c98f4d441fd0, entries=1, sequenceid=13, filesize=5.9 K 2024-11-10T13:02:22,643 INFO [RS_FLUSH_OPERATIONS-regionserver/3857ccc89b65:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 66fc512b5434e39aaeec8e55367a0047 in 25ms, sequenceid=13, compaction requested=true 2024-11-10T13:02:22,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3857ccc89b65:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for 66fc512b5434e39aaeec8e55367a0047: 2024-11-10T13:02:22,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3857ccc89b65:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731243712236.66fc512b5434e39aaeec8e55367a0047. 2024-11-10T13:02:22,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3857ccc89b65:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-11-10T13:02:22,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33021 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-11-10T13:02:22,648 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-10T13:02:22,648 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 181 msec 2024-11-10T13:02:22,650 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 188 msec 2024-11-10T13:02:22,991 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:22,991 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:23,992 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:23,992 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:24,993 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:24,993 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:25,993 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:25,993 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:26,994 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:26,994 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:27,994 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:27,994 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:28,995 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:28,995 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:29,995 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:29,995 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:30,996 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:30,996 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:31,997 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:31,997 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:32,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33021 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-10T13:02:32,523 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-10T13:02:32,523 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-10T13:02:32,524 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-10T13:02:32,524 DEBUG [Time-limited test {}] regionserver.HStore(1541): 66fc512b5434e39aaeec8e55367a0047/info is initiating minor compaction (all files) 2024-11-10T13:02:32,524 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-10T13:02:32,524 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T13:02:32,524 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 66fc512b5434e39aaeec8e55367a0047/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731243712236.66fc512b5434e39aaeec8e55367a0047. 2024-11-10T13:02:32,525 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/66fc512b5434e39aaeec8e55367a0047/info/c033c45a2d604d39b200c365187d8246, hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/66fc512b5434e39aaeec8e55367a0047/info/f6f6b24cfa47432d8d4222076dd0c769, hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/66fc512b5434e39aaeec8e55367a0047/info/dd698e9b304b4e999ae0c98f4d441fd0] into tmpdir=hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/66fc512b5434e39aaeec8e55367a0047/.tmp, totalSize=17.7 K 2024-11-10T13:02:32,525 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting c033c45a2d604d39b200c365187d8246, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1731243722320 2024-11-10T13:02:32,525 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting f6f6b24cfa47432d8d4222076dd0c769, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1731243732414 2024-11-10T13:02:32,526 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting dd698e9b304b4e999ae0c98f4d441fd0, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1731243742445 2024-11-10T13:02:32,536 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 66fc512b5434e39aaeec8e55367a0047#info#compaction#46 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-10T13:02:32,537 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/66fc512b5434e39aaeec8e55367a0047/.tmp/info/0b6620edf8694fc2a914ccdc25c8d63f is 1080, key is row0001/info:/1731243722320/Put/seqid=0 2024-11-10T13:02:32,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46011 is added to blk_1073741841_1017 (size=8296) 2024-11-10T13:02:32,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37885 is added to blk_1073741841_1017 (size=8296) 2024-11-10T13:02:32,547 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/66fc512b5434e39aaeec8e55367a0047/.tmp/info/0b6620edf8694fc2a914ccdc25c8d63f as hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/66fc512b5434e39aaeec8e55367a0047/info/0b6620edf8694fc2a914ccdc25c8d63f 2024-11-10T13:02:32,553 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 66fc512b5434e39aaeec8e55367a0047/info of 66fc512b5434e39aaeec8e55367a0047 into 0b6620edf8694fc2a914ccdc25c8d63f(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-10T13:02:32,553 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 66fc512b5434e39aaeec8e55367a0047: 2024-11-10T13:02:32,556 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3857ccc89b65%2C43055%2C1731243711302.1731243752556 2024-11-10T13:02:32,561 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:02:32,561 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:02:32,561 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:02:32,561 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:02:32,562 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:02:32,562 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/WALs/3857ccc89b65,43055,1731243711302/3857ccc89b65%2C43055%2C1731243711302.1731243742448 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/WALs/3857ccc89b65,43055,1731243711302/3857ccc89b65%2C43055%2C1731243711302.1731243752556 2024-11-10T13:02:32,562 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44813:44813),(127.0.0.1/127.0.0.1:46499:46499)] 2024-11-10T13:02:32,562 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/WALs/3857ccc89b65,43055,1731243711302/3857ccc89b65%2C43055%2C1731243711302.1731243742448 is not closed yet, will try archiving it next time 2024-11-10T13:02:32,563 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/WALs/3857ccc89b65,43055,1731243711302/3857ccc89b65%2C43055%2C1731243711302.1731243711686 to hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/oldWALs/3857ccc89b65%2C43055%2C1731243711302.1731243711686 2024-11-10T13:02:32,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37885 is added to blk_1073741839_1015 (size=2520) 2024-11-10T13:02:32,564 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33021 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-10T13:02:32,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46011 is added to blk_1073741839_1015 (size=2520) 2024-11-10T13:02:32,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33021 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-10T13:02:32,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33021 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-10T13:02:32,566 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-10T13:02:32,567 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-10T13:02:32,567 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-10T13:02:32,720 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43055 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-11-10T13:02:32,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3857ccc89b65:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731243712236.66fc512b5434e39aaeec8e55367a0047. 2024-11-10T13:02:32,720 INFO [RS_FLUSH_OPERATIONS-regionserver/3857ccc89b65:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing 66fc512b5434e39aaeec8e55367a0047 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-10T13:02:32,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3857ccc89b65:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/66fc512b5434e39aaeec8e55367a0047/.tmp/info/92b955ef7ff84fa38c4ce9e3a3e7332f is 1080, key is row0000/info:/1731243752555/Put/seqid=0 2024-11-10T13:02:32,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37885 is added to blk_1073741843_1019 (size=6033) 2024-11-10T13:02:32,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46011 is added to blk_1073741843_1019 (size=6033) 2024-11-10T13:02:32,730 INFO [RS_FLUSH_OPERATIONS-regionserver/3857ccc89b65:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/66fc512b5434e39aaeec8e55367a0047/.tmp/info/92b955ef7ff84fa38c4ce9e3a3e7332f 2024-11-10T13:02:32,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3857ccc89b65:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/66fc512b5434e39aaeec8e55367a0047/.tmp/info/92b955ef7ff84fa38c4ce9e3a3e7332f as hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/66fc512b5434e39aaeec8e55367a0047/info/92b955ef7ff84fa38c4ce9e3a3e7332f 2024-11-10T13:02:32,741 INFO [RS_FLUSH_OPERATIONS-regionserver/3857ccc89b65:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/66fc512b5434e39aaeec8e55367a0047/info/92b955ef7ff84fa38c4ce9e3a3e7332f, entries=1, sequenceid=18, filesize=5.9 K 2024-11-10T13:02:32,742 INFO [RS_FLUSH_OPERATIONS-regionserver/3857ccc89b65:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 66fc512b5434e39aaeec8e55367a0047 in 22ms, sequenceid=18, compaction requested=false 2024-11-10T13:02:32,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3857ccc89b65:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 66fc512b5434e39aaeec8e55367a0047: 2024-11-10T13:02:32,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3857ccc89b65:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731243712236.66fc512b5434e39aaeec8e55367a0047. 2024-11-10T13:02:32,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3857ccc89b65:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-10T13:02:32,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33021 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-10T13:02:32,746 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-10T13:02:32,746 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 177 msec 2024-11-10T13:02:32,748 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 183 msec 2024-11-10T13:02:32,759 INFO [master/3857ccc89b65:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-10T13:02:32,759 INFO [master/3857ccc89b65:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-10T13:02:32,997 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:32,997 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:33,998 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:33,998 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:34,998 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:34,998 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:35,999 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:35,999 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:37,000 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:37,000 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:37,589 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 66fc512b5434e39aaeec8e55367a0047, had cached 0 bytes from a total of 14329 2024-11-10T13:02:38,000 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:38,000 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:39,001 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:39,001 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:40,001 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:40,001 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:41,002 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:41,002 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:42,002 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:42,002 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:42,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33021 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-10T13:02:42,633 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-10T13:02:42,636 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3857ccc89b65%2C43055%2C1731243711302.1731243762636 2024-11-10T13:02:42,642 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:02:42,642 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:02:42,642 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:02:42,642 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:02:42,642 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:02:42,642 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/WALs/3857ccc89b65,43055,1731243711302/3857ccc89b65%2C43055%2C1731243711302.1731243752556 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/WALs/3857ccc89b65,43055,1731243711302/3857ccc89b65%2C43055%2C1731243711302.1731243762636 2024-11-10T13:02:42,643 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46499:46499),(127.0.0.1/127.0.0.1:44813:44813)] 2024-11-10T13:02:42,643 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/WALs/3857ccc89b65,43055,1731243711302/3857ccc89b65%2C43055%2C1731243711302.1731243752556 is not closed yet, will try archiving it next time 2024-11-10T13:02:42,643 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-10T13:02:42,643 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/WALs/3857ccc89b65,43055,1731243711302/3857ccc89b65%2C43055%2C1731243711302.1731243742448 to hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/oldWALs/3857ccc89b65%2C43055%2C1731243711302.1731243742448 2024-11-10T13:02:42,643 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-10T13:02:42,644 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T13:02:42,644 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T13:02:42,644 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T13:02:42,644 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-10T13:02:42,644 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-10T13:02:42,644 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1392897441, stopped=false 2024-11-10T13:02:42,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46011 is added to blk_1073741842_1018 (size=2026) 2024-11-10T13:02:42,644 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=3857ccc89b65,33021,1731243711250 2024-11-10T13:02:42,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37885 is added to blk_1073741842_1018 (size=2026) 2024-11-10T13:02:42,646 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33021-0x10101f885930000, quorum=127.0.0.1:57635, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-10T13:02:42,646 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43055-0x10101f885930001, quorum=127.0.0.1:57635, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-10T13:02:42,646 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33021-0x10101f885930000, quorum=127.0.0.1:57635, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:02:42,646 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43055-0x10101f885930001, quorum=127.0.0.1:57635, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:02:42,646 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-10T13:02:42,646 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-10T13:02:42,646 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T13:02:42,646 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T13:02:42,646 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3857ccc89b65,43055,1731243711302' ***** 2024-11-10T13:02:42,646 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-10T13:02:42,646 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43055-0x10101f885930001, quorum=127.0.0.1:57635, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T13:02:42,647 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33021-0x10101f885930000, quorum=127.0.0.1:57635, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T13:02:42,647 INFO [RS:0;3857ccc89b65:43055 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-10T13:02:42,647 INFO [RS:0;3857ccc89b65:43055 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-10T13:02:42,647 INFO [RS:0;3857ccc89b65:43055 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-10T13:02:42,647 INFO [RS:0;3857ccc89b65:43055 {}] regionserver.HRegionServer(3091): Received CLOSE for 66fc512b5434e39aaeec8e55367a0047 2024-11-10T13:02:42,647 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-10T13:02:42,648 INFO [RS:0;3857ccc89b65:43055 {}] regionserver.HRegionServer(959): stopping server 3857ccc89b65,43055,1731243711302 2024-11-10T13:02:42,648 INFO [RS:0;3857ccc89b65:43055 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-10T13:02:42,648 INFO [RS:0;3857ccc89b65:43055 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;3857ccc89b65:43055. 2024-11-10T13:02:42,648 DEBUG [RS:0;3857ccc89b65:43055 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T13:02:42,648 DEBUG [RS:0;3857ccc89b65:43055 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T13:02:42,648 DEBUG [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 66fc512b5434e39aaeec8e55367a0047, disabling compactions & flushes 2024-11-10T13:02:42,648 INFO [RS:0;3857ccc89b65:43055 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-10T13:02:42,648 INFO [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731243712236.66fc512b5434e39aaeec8e55367a0047. 2024-11-10T13:02:42,648 INFO [RS:0;3857ccc89b65:43055 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-10T13:02:42,648 DEBUG [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731243712236.66fc512b5434e39aaeec8e55367a0047. 2024-11-10T13:02:42,648 INFO [RS:0;3857ccc89b65:43055 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-10T13:02:42,648 DEBUG [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731243712236.66fc512b5434e39aaeec8e55367a0047. after waiting 0 ms 2024-11-10T13:02:42,648 DEBUG [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731243712236.66fc512b5434e39aaeec8e55367a0047. 2024-11-10T13:02:42,648 INFO [RS:0;3857ccc89b65:43055 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-10T13:02:42,648 INFO [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 66fc512b5434e39aaeec8e55367a0047 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-10T13:02:42,648 INFO [RS:0;3857ccc89b65:43055 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-10T13:02:42,648 DEBUG [RS:0;3857ccc89b65:43055 {}] regionserver.HRegionServer(1325): Online Regions={66fc512b5434e39aaeec8e55367a0047=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731243712236.66fc512b5434e39aaeec8e55367a0047., 1588230740=hbase:meta,,1.1588230740} 2024-11-10T13:02:42,648 DEBUG [RS:0;3857ccc89b65:43055 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 66fc512b5434e39aaeec8e55367a0047 2024-11-10T13:02:42,648 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-10T13:02:42,648 INFO [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-10T13:02:42,648 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-10T13:02:42,648 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-10T13:02:42,648 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-10T13:02:42,648 INFO [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-11-10T13:02:42,653 DEBUG [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/66fc512b5434e39aaeec8e55367a0047/.tmp/info/202dc0bfb84143489fdddae007e0a64b is 1080, key is row0001/info:/1731243762634/Put/seqid=0 2024-11-10T13:02:42,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46011 is added to blk_1073741845_1021 (size=6033) 2024-11-10T13:02:42,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37885 is added to blk_1073741845_1021 (size=6033) 2024-11-10T13:02:42,658 INFO [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/66fc512b5434e39aaeec8e55367a0047/.tmp/info/202dc0bfb84143489fdddae007e0a64b 2024-11-10T13:02:42,664 DEBUG [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/66fc512b5434e39aaeec8e55367a0047/.tmp/info/202dc0bfb84143489fdddae007e0a64b as hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/66fc512b5434e39aaeec8e55367a0047/info/202dc0bfb84143489fdddae007e0a64b 2024-11-10T13:02:42,666 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/data/hbase/meta/1588230740/.tmp/info/96483dee9aa946ceb268713a21631342 is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731243712236.66fc512b5434e39aaeec8e55367a0047./info:regioninfo/1731243712601/Put/seqid=0 2024-11-10T13:02:42,670 INFO [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/66fc512b5434e39aaeec8e55367a0047/info/202dc0bfb84143489fdddae007e0a64b, entries=1, sequenceid=22, filesize=5.9 K 2024-11-10T13:02:42,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46011 is added to blk_1073741846_1022 (size=7308) 2024-11-10T13:02:42,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37885 is added to blk_1073741846_1022 (size=7308) 2024-11-10T13:02:42,671 INFO [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 66fc512b5434e39aaeec8e55367a0047 in 23ms, sequenceid=22, compaction requested=true 2024-11-10T13:02:42,671 INFO [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/data/hbase/meta/1588230740/.tmp/info/96483dee9aa946ceb268713a21631342 2024-11-10T13:02:42,672 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731243712236.66fc512b5434e39aaeec8e55367a0047.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/66fc512b5434e39aaeec8e55367a0047/info/c033c45a2d604d39b200c365187d8246, hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/66fc512b5434e39aaeec8e55367a0047/info/f6f6b24cfa47432d8d4222076dd0c769, hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/66fc512b5434e39aaeec8e55367a0047/info/dd698e9b304b4e999ae0c98f4d441fd0] to archive 2024-11-10T13:02:42,673 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731243712236.66fc512b5434e39aaeec8e55367a0047.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-10T13:02:42,674 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731243712236.66fc512b5434e39aaeec8e55367a0047.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/66fc512b5434e39aaeec8e55367a0047/info/c033c45a2d604d39b200c365187d8246 to hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/66fc512b5434e39aaeec8e55367a0047/info/c033c45a2d604d39b200c365187d8246 2024-11-10T13:02:42,676 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731243712236.66fc512b5434e39aaeec8e55367a0047.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/66fc512b5434e39aaeec8e55367a0047/info/f6f6b24cfa47432d8d4222076dd0c769 to hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/66fc512b5434e39aaeec8e55367a0047/info/f6f6b24cfa47432d8d4222076dd0c769 2024-11-10T13:02:42,677 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731243712236.66fc512b5434e39aaeec8e55367a0047.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/66fc512b5434e39aaeec8e55367a0047/info/dd698e9b304b4e999ae0c98f4d441fd0 to hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/66fc512b5434e39aaeec8e55367a0047/info/dd698e9b304b4e999ae0c98f4d441fd0 2024-11-10T13:02:42,677 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731243712236.66fc512b5434e39aaeec8e55367a0047.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=3857ccc89b65:33021 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-10T13:02:42,677 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731243712236.66fc512b5434e39aaeec8e55367a0047.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [c033c45a2d604d39b200c365187d8246=6033, f6f6b24cfa47432d8d4222076dd0c769=6033, dd698e9b304b4e999ae0c98f4d441fd0=6033] 2024-11-10T13:02:42,681 DEBUG [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/66fc512b5434e39aaeec8e55367a0047/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-11-10T13:02:42,681 INFO [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731243712236.66fc512b5434e39aaeec8e55367a0047. 2024-11-10T13:02:42,681 DEBUG [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 66fc512b5434e39aaeec8e55367a0047: Waiting for close lock at 1731243762648Running coprocessor pre-close hooks at 1731243762648Disabling compacts and flushes for region at 1731243762648Disabling writes for close at 1731243762648Obtaining lock to block concurrent updates at 1731243762648Preparing flush snapshotting stores in 66fc512b5434e39aaeec8e55367a0047 at 1731243762648Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731243712236.66fc512b5434e39aaeec8e55367a0047., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1731243762648Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731243712236.66fc512b5434e39aaeec8e55367a0047. at 1731243762649 (+1 ms)Flushing 66fc512b5434e39aaeec8e55367a0047/info: creating writer at 1731243762649Flushing 66fc512b5434e39aaeec8e55367a0047/info: appending metadata at 1731243762652 (+3 ms)Flushing 66fc512b5434e39aaeec8e55367a0047/info: closing flushed file at 1731243762652Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@14475453: reopening flushed file at 1731243762664 (+12 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 66fc512b5434e39aaeec8e55367a0047 in 23ms, sequenceid=22, compaction requested=true at 1731243762671 (+7 ms)Writing region close event to WAL at 1731243762678 (+7 ms)Running coprocessor post-close hooks at 1731243762681 (+3 ms)Closed at 1731243762681 2024-11-10T13:02:42,681 DEBUG [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731243712236.66fc512b5434e39aaeec8e55367a0047. 2024-11-10T13:02:42,695 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/data/hbase/meta/1588230740/.tmp/ns/df01048917d245aca36db5ebd8ffe193 is 43, key is default/ns:d/1731243712129/Put/seqid=0 2024-11-10T13:02:42,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37885 is added to blk_1073741847_1023 (size=5153) 2024-11-10T13:02:42,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46011 is added to blk_1073741847_1023 (size=5153) 2024-11-10T13:02:42,700 INFO [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/data/hbase/meta/1588230740/.tmp/ns/df01048917d245aca36db5ebd8ffe193 2024-11-10T13:02:42,718 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/data/hbase/meta/1588230740/.tmp/table/479401c7fd86493fbe7cf9d310ab072e is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1731243712611/Put/seqid=0 2024-11-10T13:02:42,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46011 is added to blk_1073741848_1024 (size=5508) 2024-11-10T13:02:42,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37885 is added to blk_1073741848_1024 (size=5508) 2024-11-10T13:02:42,723 INFO [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/data/hbase/meta/1588230740/.tmp/table/479401c7fd86493fbe7cf9d310ab072e 2024-11-10T13:02:42,729 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/data/hbase/meta/1588230740/.tmp/info/96483dee9aa946ceb268713a21631342 as hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/data/hbase/meta/1588230740/info/96483dee9aa946ceb268713a21631342 2024-11-10T13:02:42,734 INFO [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/data/hbase/meta/1588230740/info/96483dee9aa946ceb268713a21631342, entries=10, sequenceid=11, filesize=7.1 K 2024-11-10T13:02:42,734 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/data/hbase/meta/1588230740/.tmp/ns/df01048917d245aca36db5ebd8ffe193 as hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/data/hbase/meta/1588230740/ns/df01048917d245aca36db5ebd8ffe193 2024-11-10T13:02:42,739 INFO [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/data/hbase/meta/1588230740/ns/df01048917d245aca36db5ebd8ffe193, entries=2, sequenceid=11, filesize=5.0 K 2024-11-10T13:02:42,740 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/data/hbase/meta/1588230740/.tmp/table/479401c7fd86493fbe7cf9d310ab072e as hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/data/hbase/meta/1588230740/table/479401c7fd86493fbe7cf9d310ab072e 2024-11-10T13:02:42,744 INFO [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/data/hbase/meta/1588230740/table/479401c7fd86493fbe7cf9d310ab072e, entries=2, sequenceid=11, filesize=5.4 K 2024-11-10T13:02:42,745 INFO [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 97ms, sequenceid=11, compaction requested=false 2024-11-10T13:02:42,750 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-10T13:02:42,751 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-10T13:02:42,751 INFO [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-10T13:02:42,751 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731243762648Running coprocessor pre-close hooks at 1731243762648Disabling compacts and flushes for region at 1731243762648Disabling writes for close at 1731243762648Obtaining lock to block concurrent updates at 1731243762648Preparing flush snapshotting stores in 1588230740 at 1731243762648Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1731243762649 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731243762649Flushing 1588230740/info: creating writer at 1731243762649Flushing 1588230740/info: appending metadata at 1731243762666 (+17 ms)Flushing 1588230740/info: closing flushed file at 1731243762666Flushing 1588230740/ns: creating writer at 1731243762676 (+10 ms)Flushing 1588230740/ns: appending metadata at 1731243762694 (+18 ms)Flushing 1588230740/ns: closing flushed file at 1731243762695 (+1 ms)Flushing 1588230740/table: creating writer at 1731243762704 (+9 ms)Flushing 1588230740/table: appending metadata at 1731243762718 (+14 ms)Flushing 1588230740/table: closing flushed file at 1731243762718Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@26241a: reopening flushed file at 1731243762728 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@e641448: reopening flushed file at 1731243762734 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@296ff29a: reopening flushed file at 1731243762739 (+5 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 97ms, sequenceid=11, compaction requested=false at 1731243762746 (+7 ms)Writing region close event to WAL at 1731243762747 (+1 ms)Running coprocessor post-close hooks at 1731243762751 (+4 ms)Closed at 1731243762751 2024-11-10T13:02:42,751 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-10T13:02:42,848 INFO [RS:0;3857ccc89b65:43055 {}] regionserver.HRegionServer(976): stopping server 3857ccc89b65,43055,1731243711302; all regions closed. 2024-11-10T13:02:42,849 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:02:42,849 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:02:42,849 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:02:42,849 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:02:42,849 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:02:42,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37885 is added to blk_1073741834_1010 (size=3306) 2024-11-10T13:02:42,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46011 is added to blk_1073741834_1010 (size=3306) 2024-11-10T13:02:42,854 DEBUG [RS:0;3857ccc89b65:43055 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/oldWALs 2024-11-10T13:02:42,854 INFO [RS:0;3857ccc89b65:43055 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3857ccc89b65%2C43055%2C1731243711302.meta:.meta(num 1731243712086) 2024-11-10T13:02:42,854 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:02:42,854 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:02:42,854 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:02:42,854 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:02:42,854 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:02:42,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37885 is added to blk_1073741844_1020 (size=1252) 2024-11-10T13:02:42,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46011 is added to blk_1073741844_1020 (size=1252) 2024-11-10T13:02:42,859 DEBUG [RS:0;3857ccc89b65:43055 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/oldWALs 2024-11-10T13:02:42,859 INFO [RS:0;3857ccc89b65:43055 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3857ccc89b65%2C43055%2C1731243711302:(num 1731243762636) 2024-11-10T13:02:42,859 DEBUG [RS:0;3857ccc89b65:43055 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T13:02:42,859 INFO [RS:0;3857ccc89b65:43055 {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T13:02:42,859 INFO [RS:0;3857ccc89b65:43055 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-10T13:02:42,859 INFO [RS:0;3857ccc89b65:43055 {}] hbase.ChoreService(370): Chore service for: regionserver/3857ccc89b65:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-10T13:02:42,859 INFO [RS:0;3857ccc89b65:43055 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-10T13:02:42,859 INFO [regionserver/3857ccc89b65:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-10T13:02:42,860 INFO [RS:0;3857ccc89b65:43055 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43055 2024-11-10T13:02:42,861 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33021-0x10101f885930000, quorum=127.0.0.1:57635, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-10T13:02:42,861 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43055-0x10101f885930001, quorum=127.0.0.1:57635, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3857ccc89b65,43055,1731243711302 2024-11-10T13:02:42,862 INFO [RS:0;3857ccc89b65:43055 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-10T13:02:42,863 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3857ccc89b65,43055,1731243711302] 2024-11-10T13:02:42,866 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3857ccc89b65,43055,1731243711302 already deleted, retry=false 2024-11-10T13:02:42,866 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3857ccc89b65,43055,1731243711302 expired; onlineServers=0 2024-11-10T13:02:42,866 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '3857ccc89b65,33021,1731243711250' ***** 2024-11-10T13:02:42,866 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-10T13:02:42,866 INFO [M:0;3857ccc89b65:33021 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-10T13:02:42,866 INFO [M:0;3857ccc89b65:33021 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-10T13:02:42,866 DEBUG [M:0;3857ccc89b65:33021 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-10T13:02:42,866 DEBUG [M:0;3857ccc89b65:33021 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-10T13:02:42,866 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-10T13:02:42,866 DEBUG [master/3857ccc89b65:0:becomeActiveMaster-HFileCleaner.large.0-1731243711483 {}] cleaner.HFileCleaner(306): Exit Thread[master/3857ccc89b65:0:becomeActiveMaster-HFileCleaner.large.0-1731243711483,5,FailOnTimeoutGroup] 2024-11-10T13:02:42,866 DEBUG [master/3857ccc89b65:0:becomeActiveMaster-HFileCleaner.small.0-1731243711483 {}] cleaner.HFileCleaner(306): Exit Thread[master/3857ccc89b65:0:becomeActiveMaster-HFileCleaner.small.0-1731243711483,5,FailOnTimeoutGroup] 2024-11-10T13:02:42,866 INFO [M:0;3857ccc89b65:33021 {}] hbase.ChoreService(370): Chore service for: master/3857ccc89b65:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-10T13:02:42,866 INFO [M:0;3857ccc89b65:33021 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-10T13:02:42,866 DEBUG [M:0;3857ccc89b65:33021 {}] master.HMaster(1795): Stopping service threads 2024-11-10T13:02:42,866 INFO [M:0;3857ccc89b65:33021 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-10T13:02:42,867 INFO [M:0;3857ccc89b65:33021 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-10T13:02:42,867 INFO [M:0;3857ccc89b65:33021 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-10T13:02:42,867 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-10T13:02:42,867 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33021-0x10101f885930000, quorum=127.0.0.1:57635, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-10T13:02:42,867 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33021-0x10101f885930000, quorum=127.0.0.1:57635, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:02:42,868 DEBUG [M:0;3857ccc89b65:33021 {}] zookeeper.ZKUtil(347): master:33021-0x10101f885930000, quorum=127.0.0.1:57635, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-10T13:02:42,868 WARN [M:0;3857ccc89b65:33021 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-10T13:02:42,868 INFO [M:0;3857ccc89b65:33021 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/.lastflushedseqids 2024-11-10T13:02:42,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46011 is added to blk_1073741849_1025 (size=130) 2024-11-10T13:02:42,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37885 is added to blk_1073741849_1025 (size=130) 2024-11-10T13:02:42,873 INFO [M:0;3857ccc89b65:33021 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-10T13:02:42,873 INFO [M:0;3857ccc89b65:33021 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-10T13:02:42,873 DEBUG [M:0;3857ccc89b65:33021 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-10T13:02:42,873 INFO [M:0;3857ccc89b65:33021 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T13:02:42,873 DEBUG [M:0;3857ccc89b65:33021 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T13:02:42,873 DEBUG [M:0;3857ccc89b65:33021 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-10T13:02:42,873 DEBUG [M:0;3857ccc89b65:33021 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T13:02:42,874 INFO [M:0;3857ccc89b65:33021 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.58 KB heapSize=54.99 KB 2024-11-10T13:02:42,889 DEBUG [M:0;3857ccc89b65:33021 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4cb92652e5754491a4f3b7aec568a46d is 82, key is hbase:meta,,1/info:regioninfo/1731243712113/Put/seqid=0 2024-11-10T13:02:42,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46011 is added to blk_1073741850_1026 (size=5672) 2024-11-10T13:02:42,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37885 is added to blk_1073741850_1026 (size=5672) 2024-11-10T13:02:42,893 INFO [M:0;3857ccc89b65:33021 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4cb92652e5754491a4f3b7aec568a46d 2024-11-10T13:02:42,911 DEBUG [M:0;3857ccc89b65:33021 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0765e2f88c1f4d96ad254ea1f14905ce is 798, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731243712616/Put/seqid=0 2024-11-10T13:02:42,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37885 is added to blk_1073741851_1027 (size=7822) 2024-11-10T13:02:42,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46011 is added to blk_1073741851_1027 (size=7822) 2024-11-10T13:02:42,916 INFO [M:0;3857ccc89b65:33021 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.98 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0765e2f88c1f4d96ad254ea1f14905ce 2024-11-10T13:02:42,920 INFO [M:0;3857ccc89b65:33021 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 0765e2f88c1f4d96ad254ea1f14905ce 2024-11-10T13:02:42,935 DEBUG [M:0;3857ccc89b65:33021 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3714ec6c5c214d5f963d793f1b237ae5 is 69, key is 3857ccc89b65,43055,1731243711302/rs:state/1731243711538/Put/seqid=0 2024-11-10T13:02:42,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37885 is added to blk_1073741852_1028 (size=5156) 2024-11-10T13:02:42,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46011 is added to blk_1073741852_1028 (size=5156) 2024-11-10T13:02:42,940 INFO [M:0;3857ccc89b65:33021 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3714ec6c5c214d5f963d793f1b237ae5 2024-11-10T13:02:42,959 DEBUG [M:0;3857ccc89b65:33021 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/17da7b9070b64448a281f7d2077a6554 is 52, key is load_balancer_on/state:d/1731243712232/Put/seqid=0 2024-11-10T13:02:42,963 INFO [RS:0;3857ccc89b65:43055 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-10T13:02:42,963 INFO [RS:0;3857ccc89b65:43055 {}] regionserver.HRegionServer(1031): Exiting; stopping=3857ccc89b65,43055,1731243711302; zookeeper connection closed. 2024-11-10T13:02:42,963 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43055-0x10101f885930001, quorum=127.0.0.1:57635, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T13:02:42,963 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43055-0x10101f885930001, quorum=127.0.0.1:57635, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T13:02:42,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37885 is added to blk_1073741853_1029 (size=5056) 2024-11-10T13:02:42,964 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7305cab4 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7305cab4 2024-11-10T13:02:42,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46011 is added to blk_1073741853_1029 (size=5056) 2024-11-10T13:02:42,964 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-10T13:02:42,964 INFO [M:0;3857ccc89b65:33021 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/17da7b9070b64448a281f7d2077a6554 2024-11-10T13:02:42,969 DEBUG [M:0;3857ccc89b65:33021 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4cb92652e5754491a4f3b7aec568a46d as hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/4cb92652e5754491a4f3b7aec568a46d 2024-11-10T13:02:42,973 INFO [M:0;3857ccc89b65:33021 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/4cb92652e5754491a4f3b7aec568a46d, entries=8, sequenceid=121, filesize=5.5 K 2024-11-10T13:02:42,974 DEBUG [M:0;3857ccc89b65:33021 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0765e2f88c1f4d96ad254ea1f14905ce as hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/0765e2f88c1f4d96ad254ea1f14905ce 2024-11-10T13:02:42,978 INFO [M:0;3857ccc89b65:33021 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 0765e2f88c1f4d96ad254ea1f14905ce 2024-11-10T13:02:42,978 INFO [M:0;3857ccc89b65:33021 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/0765e2f88c1f4d96ad254ea1f14905ce, entries=14, sequenceid=121, filesize=7.6 K 2024-11-10T13:02:42,979 DEBUG [M:0;3857ccc89b65:33021 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3714ec6c5c214d5f963d793f1b237ae5 as hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/3714ec6c5c214d5f963d793f1b237ae5 2024-11-10T13:02:42,983 INFO [M:0;3857ccc89b65:33021 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/3714ec6c5c214d5f963d793f1b237ae5, entries=1, sequenceid=121, filesize=5.0 K 2024-11-10T13:02:42,984 DEBUG [M:0;3857ccc89b65:33021 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/17da7b9070b64448a281f7d2077a6554 as hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/17da7b9070b64448a281f7d2077a6554 2024-11-10T13:02:42,988 INFO [M:0;3857ccc89b65:33021 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37879/user/jenkins/test-data/35294e02-2be1-c64f-a996-9e1116c3b0ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/17da7b9070b64448a281f7d2077a6554, entries=1, sequenceid=121, filesize=4.9 K 2024-11-10T13:02:42,989 INFO [M:0;3857ccc89b65:33021 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.58 KB/44629, heapSize ~54.93 KB/56248, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 116ms, sequenceid=121, compaction requested=false 2024-11-10T13:02:42,990 INFO [M:0;3857ccc89b65:33021 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T13:02:42,990 DEBUG [M:0;3857ccc89b65:33021 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731243762873Disabling compacts and flushes for region at 1731243762873Disabling writes for close at 1731243762873Obtaining lock to block concurrent updates at 1731243762874 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731243762874Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44629, getHeapSize=56248, getOffHeapSize=0, getCellsCount=140 at 1731243762874Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731243762875 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731243762875Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731243762888 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731243762889 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731243762897 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731243762911 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731243762911Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731243762920 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731243762934 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731243762934Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731243762945 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731243762958 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731243762959 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@74c12874: reopening flushed file at 1731243762968 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@430633e1: reopening flushed file at 1731243762973 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@436de37b: reopening flushed file at 1731243762978 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2c3cc7: reopening flushed file at 1731243762983 (+5 ms)Finished flush of dataSize ~43.58 KB/44629, heapSize ~54.93 KB/56248, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 116ms, sequenceid=121, compaction requested=false at 1731243762989 (+6 ms)Writing region close event to WAL at 1731243762990 (+1 ms)Closed at 1731243762990 2024-11-10T13:02:42,991 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:02:42,991 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:02:42,991 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:02:42,991 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:02:42,991 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:02:42,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46011 is added to blk_1073741830_1006 (size=53026) 2024-11-10T13:02:42,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37885 is added to blk_1073741830_1006 (size=53026) 2024-11-10T13:02:42,994 INFO [M:0;3857ccc89b65:33021 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-10T13:02:42,994 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-10T13:02:42,994 INFO [M:0;3857ccc89b65:33021 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33021 2024-11-10T13:02:42,994 INFO [M:0;3857ccc89b65:33021 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-10T13:02:43,003 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:43,003 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:43,096 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33021-0x10101f885930000, quorum=127.0.0.1:57635, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T13:02:43,096 INFO [M:0;3857ccc89b65:33021 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-10T13:02:43,096 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33021-0x10101f885930000, quorum=127.0.0.1:57635, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T13:02:43,098 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1bfe295d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T13:02:43,099 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@22a4ff4e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T13:02:43,099 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T13:02:43,099 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5ca63b58{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T13:02:43,099 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2fa194e5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b037f8cb-13dc-91a9-da9b-b5da201d4ac9/hadoop.log.dir/,STOPPED} 2024-11-10T13:02:43,100 WARN [BP-1300434893-172.17.0.2-1731243710403 heartbeating to localhost/127.0.0.1:37879 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-10T13:02:43,100 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-10T13:02:43,100 WARN [BP-1300434893-172.17.0.2-1731243710403 heartbeating to localhost/127.0.0.1:37879 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1300434893-172.17.0.2-1731243710403 (Datanode Uuid abdb9076-8bca-4f9d-a844-e54e13f1b783) service to localhost/127.0.0.1:37879 2024-11-10T13:02:43,100 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-10T13:02:43,101 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b037f8cb-13dc-91a9-da9b-b5da201d4ac9/cluster_edfbdfca-8937-9eaf-525c-a945f7224ae0/data/data3/current/BP-1300434893-172.17.0.2-1731243710403 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T13:02:43,101 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b037f8cb-13dc-91a9-da9b-b5da201d4ac9/cluster_edfbdfca-8937-9eaf-525c-a945f7224ae0/data/data4/current/BP-1300434893-172.17.0.2-1731243710403 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T13:02:43,101 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-10T13:02:43,103 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@20e4ef1d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T13:02:43,104 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1fc56883{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T13:02:43,104 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T13:02:43,104 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@20ad0bae{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T13:02:43,104 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3fa18b90{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b037f8cb-13dc-91a9-da9b-b5da201d4ac9/hadoop.log.dir/,STOPPED} 2024-11-10T13:02:43,105 WARN [BP-1300434893-172.17.0.2-1731243710403 heartbeating to localhost/127.0.0.1:37879 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-10T13:02:43,105 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-10T13:02:43,105 WARN [BP-1300434893-172.17.0.2-1731243710403 heartbeating to localhost/127.0.0.1:37879 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1300434893-172.17.0.2-1731243710403 (Datanode Uuid bc60de89-34db-4f29-b074-a5cd4c9ae460) service to localhost/127.0.0.1:37879 2024-11-10T13:02:43,105 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-10T13:02:43,106 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b037f8cb-13dc-91a9-da9b-b5da201d4ac9/cluster_edfbdfca-8937-9eaf-525c-a945f7224ae0/data/data1/current/BP-1300434893-172.17.0.2-1731243710403 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T13:02:43,106 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b037f8cb-13dc-91a9-da9b-b5da201d4ac9/cluster_edfbdfca-8937-9eaf-525c-a945f7224ae0/data/data2/current/BP-1300434893-172.17.0.2-1731243710403 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T13:02:43,107 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-10T13:02:43,113 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@29b40997{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-10T13:02:43,113 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@37ea919c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T13:02:43,113 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T13:02:43,113 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@48b18cf{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T13:02:43,113 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@232381c8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b037f8cb-13dc-91a9-da9b-b5da201d4ac9/hadoop.log.dir/,STOPPED} 2024-11-10T13:02:43,119 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-10T13:02:43,136 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-10T13:02:43,145 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=207 (was 181) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:37879 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:37879 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:37879 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37879 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37879 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/3857ccc89b65:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:37879 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:37879 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37879 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=483 (was 455) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=108 (was 139), ProcessCount=11 (was 11), AvailableMemoryMB=7834 (was 8043) 2024-11-10T13:02:43,152 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=207, OpenFileDescriptor=483, MaxFileDescriptor=1048576, SystemLoadAverage=108, ProcessCount=11, AvailableMemoryMB=7834 2024-11-10T13:02:43,153 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-10T13:02:43,153 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b037f8cb-13dc-91a9-da9b-b5da201d4ac9/hadoop.log.dir so I do NOT create it in target/test-data/2129a985-0107-fd4d-1ed0-3e66a6379ba5 2024-11-10T13:02:43,153 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b037f8cb-13dc-91a9-da9b-b5da201d4ac9/hadoop.tmp.dir so I do NOT create it in target/test-data/2129a985-0107-fd4d-1ed0-3e66a6379ba5 2024-11-10T13:02:43,153 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2129a985-0107-fd4d-1ed0-3e66a6379ba5/cluster_c2aaed9f-bc9f-becc-03cb-379711ed8b04, deleteOnExit=true 2024-11-10T13:02:43,153 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-10T13:02:43,153 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2129a985-0107-fd4d-1ed0-3e66a6379ba5/test.cache.data in system properties and HBase conf 2024-11-10T13:02:43,153 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2129a985-0107-fd4d-1ed0-3e66a6379ba5/hadoop.tmp.dir in system properties and HBase conf 2024-11-10T13:02:43,153 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2129a985-0107-fd4d-1ed0-3e66a6379ba5/hadoop.log.dir in system properties and HBase conf 2024-11-10T13:02:43,153 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2129a985-0107-fd4d-1ed0-3e66a6379ba5/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-10T13:02:43,153 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2129a985-0107-fd4d-1ed0-3e66a6379ba5/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-10T13:02:43,153 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-10T13:02:43,153 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-10T13:02:43,154 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2129a985-0107-fd4d-1ed0-3e66a6379ba5/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-10T13:02:43,154 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2129a985-0107-fd4d-1ed0-3e66a6379ba5/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-10T13:02:43,154 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2129a985-0107-fd4d-1ed0-3e66a6379ba5/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-10T13:02:43,154 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2129a985-0107-fd4d-1ed0-3e66a6379ba5/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-10T13:02:43,154 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2129a985-0107-fd4d-1ed0-3e66a6379ba5/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-10T13:02:43,154 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2129a985-0107-fd4d-1ed0-3e66a6379ba5/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-10T13:02:43,154 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2129a985-0107-fd4d-1ed0-3e66a6379ba5/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-10T13:02:43,154 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2129a985-0107-fd4d-1ed0-3e66a6379ba5/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-10T13:02:43,154 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2129a985-0107-fd4d-1ed0-3e66a6379ba5/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-10T13:02:43,154 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2129a985-0107-fd4d-1ed0-3e66a6379ba5/nfs.dump.dir in system properties and HBase conf 2024-11-10T13:02:43,154 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2129a985-0107-fd4d-1ed0-3e66a6379ba5/java.io.tmpdir in system properties and HBase conf 2024-11-10T13:02:43,154 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2129a985-0107-fd4d-1ed0-3e66a6379ba5/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-10T13:02:43,154 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2129a985-0107-fd4d-1ed0-3e66a6379ba5/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-10T13:02:43,154 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2129a985-0107-fd4d-1ed0-3e66a6379ba5/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-10T13:02:43,167 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-10T13:02:43,229 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T13:02:43,232 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T13:02:43,234 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T13:02:43,234 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T13:02:43,234 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-10T13:02:43,234 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T13:02:43,235 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@188ddc10{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2129a985-0107-fd4d-1ed0-3e66a6379ba5/hadoop.log.dir/,AVAILABLE} 2024-11-10T13:02:43,235 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@25570184{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T13:02:43,350 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@63c68f65{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2129a985-0107-fd4d-1ed0-3e66a6379ba5/java.io.tmpdir/jetty-localhost-37193-hadoop-hdfs-3_4_1-tests_jar-_-any-1663600403233584145/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-10T13:02:43,350 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@349a863b{HTTP/1.1, (http/1.1)}{localhost:37193} 2024-11-10T13:02:43,350 INFO [Time-limited test {}] server.Server(415): Started @236321ms 2024-11-10T13:02:43,363 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-10T13:02:43,432 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T13:02:43,435 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T13:02:43,436 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T13:02:43,436 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T13:02:43,436 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-10T13:02:43,436 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@595aaa8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2129a985-0107-fd4d-1ed0-3e66a6379ba5/hadoop.log.dir/,AVAILABLE} 2024-11-10T13:02:43,437 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3bd9a438{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T13:02:43,550 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@277e18bc{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2129a985-0107-fd4d-1ed0-3e66a6379ba5/java.io.tmpdir/jetty-localhost-34433-hadoop-hdfs-3_4_1-tests_jar-_-any-11021514602009828342/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T13:02:43,550 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5ce0132a{HTTP/1.1, (http/1.1)}{localhost:34433} 2024-11-10T13:02:43,550 INFO [Time-limited test {}] server.Server(415): Started @236521ms 2024-11-10T13:02:43,551 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-10T13:02:43,554 INFO [regionserver/3857ccc89b65:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T13:02:43,581 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T13:02:43,584 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T13:02:43,585 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T13:02:43,585 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T13:02:43,585 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-10T13:02:43,585 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3612be31{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2129a985-0107-fd4d-1ed0-3e66a6379ba5/hadoop.log.dir/,AVAILABLE} 2024-11-10T13:02:43,585 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5ff7780b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T13:02:43,699 WARN [Thread-1955 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2129a985-0107-fd4d-1ed0-3e66a6379ba5/cluster_c2aaed9f-bc9f-becc-03cb-379711ed8b04/data/data1/current/BP-422773242-172.17.0.2-1731243763174/current, will proceed with Du for space computation calculation, 2024-11-10T13:02:43,700 WARN [Thread-1956 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2129a985-0107-fd4d-1ed0-3e66a6379ba5/cluster_c2aaed9f-bc9f-becc-03cb-379711ed8b04/data/data2/current/BP-422773242-172.17.0.2-1731243763174/current, will proceed with Du for space computation calculation, 2024-11-10T13:02:43,723 WARN [Thread-1934 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-10T13:02:43,726 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1af64a2469e76427 with lease ID 0x59d5e4510dffa685: Processing first storage report for DS-7a8b2dea-0bd5-4646-9b22-111e484d77e4 from datanode DatanodeRegistration(127.0.0.1:44757, datanodeUuid=07ecba63-805c-4a7d-bbc4-0d5c4212a0b7, infoPort=33133, infoSecurePort=0, ipcPort=46113, storageInfo=lv=-57;cid=testClusterID;nsid=1330387679;c=1731243763174) 2024-11-10T13:02:43,726 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1af64a2469e76427 with lease ID 0x59d5e4510dffa685: from storage DS-7a8b2dea-0bd5-4646-9b22-111e484d77e4 node DatanodeRegistration(127.0.0.1:44757, datanodeUuid=07ecba63-805c-4a7d-bbc4-0d5c4212a0b7, infoPort=33133, infoSecurePort=0, ipcPort=46113, storageInfo=lv=-57;cid=testClusterID;nsid=1330387679;c=1731243763174), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-10T13:02:43,726 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1af64a2469e76427 with lease ID 0x59d5e4510dffa685: Processing first storage report for DS-4b73f4bf-18bd-4ef3-a48d-6ceadfe3b295 from datanode DatanodeRegistration(127.0.0.1:44757, datanodeUuid=07ecba63-805c-4a7d-bbc4-0d5c4212a0b7, infoPort=33133, infoSecurePort=0, ipcPort=46113, storageInfo=lv=-57;cid=testClusterID;nsid=1330387679;c=1731243763174) 2024-11-10T13:02:43,726 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1af64a2469e76427 with lease ID 0x59d5e4510dffa685: from storage DS-4b73f4bf-18bd-4ef3-a48d-6ceadfe3b295 node DatanodeRegistration(127.0.0.1:44757, datanodeUuid=07ecba63-805c-4a7d-bbc4-0d5c4212a0b7, infoPort=33133, infoSecurePort=0, ipcPort=46113, storageInfo=lv=-57;cid=testClusterID;nsid=1330387679;c=1731243763174), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T13:02:43,732 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@42d8f83b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2129a985-0107-fd4d-1ed0-3e66a6379ba5/java.io.tmpdir/jetty-localhost-40819-hadoop-hdfs-3_4_1-tests_jar-_-any-14343175223064476911/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T13:02:43,733 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3d31ee43{HTTP/1.1, (http/1.1)}{localhost:40819} 2024-11-10T13:02:43,733 INFO [Time-limited test {}] server.Server(415): Started @236703ms 2024-11-10T13:02:43,734 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-10T13:02:43,826 WARN [Thread-1981 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2129a985-0107-fd4d-1ed0-3e66a6379ba5/cluster_c2aaed9f-bc9f-becc-03cb-379711ed8b04/data/data3/current/BP-422773242-172.17.0.2-1731243763174/current, will proceed with Du for space computation calculation, 2024-11-10T13:02:43,826 WARN [Thread-1982 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2129a985-0107-fd4d-1ed0-3e66a6379ba5/cluster_c2aaed9f-bc9f-becc-03cb-379711ed8b04/data/data4/current/BP-422773242-172.17.0.2-1731243763174/current, will proceed with Du for space computation calculation, 2024-11-10T13:02:43,842 WARN [Thread-1970 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-10T13:02:43,844 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9c607a30736674f5 with lease ID 0x59d5e4510dffa686: Processing first storage report for DS-413d55ba-e72d-4641-bdb7-6b07f254917d from datanode DatanodeRegistration(127.0.0.1:46573, datanodeUuid=febf492f-ce3e-4b83-baff-1f10f21665d8, infoPort=37721, infoSecurePort=0, ipcPort=41243, storageInfo=lv=-57;cid=testClusterID;nsid=1330387679;c=1731243763174) 2024-11-10T13:02:43,844 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9c607a30736674f5 with lease ID 0x59d5e4510dffa686: from storage DS-413d55ba-e72d-4641-bdb7-6b07f254917d node DatanodeRegistration(127.0.0.1:46573, datanodeUuid=febf492f-ce3e-4b83-baff-1f10f21665d8, infoPort=37721, infoSecurePort=0, ipcPort=41243, storageInfo=lv=-57;cid=testClusterID;nsid=1330387679;c=1731243763174), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T13:02:43,844 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9c607a30736674f5 with lease ID 0x59d5e4510dffa686: Processing first storage report for DS-7f316b38-2898-48e8-ba58-9547bad95d88 from datanode DatanodeRegistration(127.0.0.1:46573, datanodeUuid=febf492f-ce3e-4b83-baff-1f10f21665d8, infoPort=37721, infoSecurePort=0, ipcPort=41243, storageInfo=lv=-57;cid=testClusterID;nsid=1330387679;c=1731243763174) 2024-11-10T13:02:43,844 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9c607a30736674f5 with lease ID 0x59d5e4510dffa686: from storage DS-7f316b38-2898-48e8-ba58-9547bad95d88 node DatanodeRegistration(127.0.0.1:46573, datanodeUuid=febf492f-ce3e-4b83-baff-1f10f21665d8, infoPort=37721, infoSecurePort=0, ipcPort=41243, storageInfo=lv=-57;cid=testClusterID;nsid=1330387679;c=1731243763174), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T13:02:43,854 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2129a985-0107-fd4d-1ed0-3e66a6379ba5 2024-11-10T13:02:43,856 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2129a985-0107-fd4d-1ed0-3e66a6379ba5/cluster_c2aaed9f-bc9f-becc-03cb-379711ed8b04/zookeeper_0, clientPort=54757, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2129a985-0107-fd4d-1ed0-3e66a6379ba5/cluster_c2aaed9f-bc9f-becc-03cb-379711ed8b04/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2129a985-0107-fd4d-1ed0-3e66a6379ba5/cluster_c2aaed9f-bc9f-becc-03cb-379711ed8b04/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-10T13:02:43,857 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=54757 2024-11-10T13:02:43,857 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:02:43,859 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:02:43,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44757 is added to blk_1073741825_1001 (size=7) 2024-11-10T13:02:43,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46573 is added to blk_1073741825_1001 (size=7) 2024-11-10T13:02:43,867 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9 with version=8 2024-11-10T13:02:43,867 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/hbase-staging 2024-11-10T13:02:43,869 INFO [Time-limited test {}] client.ConnectionUtils(128): master/3857ccc89b65:0 server-side Connection retries=45 2024-11-10T13:02:43,869 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T13:02:43,869 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-10T13:02:43,869 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-10T13:02:43,869 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T13:02:43,869 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-10T13:02:43,870 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-10T13:02:43,870 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-10T13:02:43,870 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44609 2024-11-10T13:02:43,871 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:44609 connecting to ZooKeeper ensemble=127.0.0.1:54757 2024-11-10T13:02:43,877 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:446090x0, quorum=127.0.0.1:54757, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-10T13:02:43,877 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:44609-0x10101f9531f0000 connected 2024-11-10T13:02:43,890 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:02:43,892 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:02:43,893 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44609-0x10101f9531f0000, quorum=127.0.0.1:54757, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T13:02:43,893 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9, hbase.cluster.distributed=false 2024-11-10T13:02:43,895 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44609-0x10101f9531f0000, quorum=127.0.0.1:54757, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-10T13:02:43,895 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44609 2024-11-10T13:02:43,896 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44609 2024-11-10T13:02:43,896 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44609 2024-11-10T13:02:43,897 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44609 2024-11-10T13:02:43,897 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44609 2024-11-10T13:02:43,912 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3857ccc89b65:0 server-side Connection retries=45 2024-11-10T13:02:43,912 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T13:02:43,912 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-10T13:02:43,912 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-10T13:02:43,912 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T13:02:43,912 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-10T13:02:43,912 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-10T13:02:43,912 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-10T13:02:43,913 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42123 2024-11-10T13:02:43,914 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42123 connecting to ZooKeeper ensemble=127.0.0.1:54757 2024-11-10T13:02:43,914 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:02:43,916 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:02:43,919 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:421230x0, quorum=127.0.0.1:54757, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-10T13:02:43,920 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:421230x0, quorum=127.0.0.1:54757, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T13:02:43,920 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42123-0x10101f9531f0001 connected 2024-11-10T13:02:43,920 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-10T13:02:43,920 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-10T13:02:43,921 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42123-0x10101f9531f0001, quorum=127.0.0.1:54757, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-10T13:02:43,922 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42123-0x10101f9531f0001, quorum=127.0.0.1:54757, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-10T13:02:43,922 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42123 2024-11-10T13:02:43,922 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42123 2024-11-10T13:02:43,923 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42123 2024-11-10T13:02:43,923 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42123 2024-11-10T13:02:43,923 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42123 2024-11-10T13:02:43,934 DEBUG [M:0;3857ccc89b65:44609 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;3857ccc89b65:44609 2024-11-10T13:02:43,934 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/3857ccc89b65,44609,1731243763869 2024-11-10T13:02:43,936 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42123-0x10101f9531f0001, quorum=127.0.0.1:54757, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T13:02:43,936 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44609-0x10101f9531f0000, quorum=127.0.0.1:54757, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T13:02:43,936 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44609-0x10101f9531f0000, quorum=127.0.0.1:54757, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/3857ccc89b65,44609,1731243763869 2024-11-10T13:02:43,939 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44609-0x10101f9531f0000, quorum=127.0.0.1:54757, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:02:43,939 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42123-0x10101f9531f0001, quorum=127.0.0.1:54757, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-10T13:02:43,939 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42123-0x10101f9531f0001, quorum=127.0.0.1:54757, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:02:43,939 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44609-0x10101f9531f0000, quorum=127.0.0.1:54757, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-10T13:02:43,939 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/3857ccc89b65,44609,1731243763869 from backup master directory 2024-11-10T13:02:43,940 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44609-0x10101f9531f0000, quorum=127.0.0.1:54757, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/3857ccc89b65,44609,1731243763869 2024-11-10T13:02:43,940 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42123-0x10101f9531f0001, quorum=127.0.0.1:54757, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T13:02:43,940 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44609-0x10101f9531f0000, quorum=127.0.0.1:54757, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T13:02:43,940 WARN [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-10T13:02:43,940 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=3857ccc89b65,44609,1731243763869 2024-11-10T13:02:43,944 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/hbase.id] with ID: 800ee844-adf9-41cb-91dd-388337b0f5b7 2024-11-10T13:02:43,944 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/.tmp/hbase.id 2024-11-10T13:02:43,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46573 is added to blk_1073741826_1002 (size=42) 2024-11-10T13:02:43,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44757 is added to blk_1073741826_1002 (size=42) 2024-11-10T13:02:43,950 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/.tmp/hbase.id]:[hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/hbase.id] 2024-11-10T13:02:43,959 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:02:43,959 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-10T13:02:43,960 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-10T13:02:43,962 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42123-0x10101f9531f0001, quorum=127.0.0.1:54757, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:02:43,962 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44609-0x10101f9531f0000, quorum=127.0.0.1:54757, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:02:43,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44757 is added to blk_1073741827_1003 (size=196) 2024-11-10T13:02:43,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46573 is added to blk_1073741827_1003 (size=196) 2024-11-10T13:02:43,970 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-10T13:02:43,970 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-10T13:02:43,971 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-10T13:02:43,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46573 is added to blk_1073741828_1004 (size=1189) 2024-11-10T13:02:43,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44757 is added to blk_1073741828_1004 (size=1189) 2024-11-10T13:02:43,977 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/MasterData/data/master/store 2024-11-10T13:02:43,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46573 is added to blk_1073741829_1005 (size=34) 2024-11-10T13:02:43,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44757 is added to blk_1073741829_1005 (size=34) 2024-11-10T13:02:43,983 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T13:02:43,983 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-10T13:02:43,983 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T13:02:43,983 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T13:02:43,983 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-10T13:02:43,983 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T13:02:43,983 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T13:02:43,983 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731243763983Disabling compacts and flushes for region at 1731243763983Disabling writes for close at 1731243763983Writing region close event to WAL at 1731243763983Closed at 1731243763983 2024-11-10T13:02:43,984 WARN [master/3857ccc89b65:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/MasterData/data/master/store/.initializing 2024-11-10T13:02:43,984 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/MasterData/WALs/3857ccc89b65,44609,1731243763869 2024-11-10T13:02:43,986 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3857ccc89b65%2C44609%2C1731243763869, suffix=, logDir=hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/MasterData/WALs/3857ccc89b65,44609,1731243763869, archiveDir=hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/MasterData/oldWALs, maxLogs=10 2024-11-10T13:02:43,987 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3857ccc89b65%2C44609%2C1731243763869.1731243763986 2024-11-10T13:02:43,990 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/MasterData/WALs/3857ccc89b65,44609,1731243763869/3857ccc89b65%2C44609%2C1731243763869.1731243763986 2024-11-10T13:02:43,991 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37721:37721),(127.0.0.1/127.0.0.1:33133:33133)] 2024-11-10T13:02:43,991 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-10T13:02:43,992 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T13:02:43,992 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:02:43,992 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:02:43,996 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:02:43,997 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-10T13:02:43,998 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:02:43,998 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:02:43,998 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:02:43,999 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-10T13:02:43,999 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:02:44,000 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T13:02:44,000 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:02:44,001 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-10T13:02:44,001 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:02:44,001 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T13:02:44,001 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:02:44,002 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-10T13:02:44,003 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:02:44,003 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T13:02:44,003 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:02:44,004 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:02:44,004 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:44,004 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:44,004 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:02:44,005 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:02:44,005 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:02:44,006 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-10T13:02:44,007 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:02:44,008 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T13:02:44,009 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=735230, jitterRate=-0.06510703265666962}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-10T13:02:44,009 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731243763992Initializing all the Stores at 1731243763992Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731243763992Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731243763996 (+4 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731243763996Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731243763996Cleaning up temporary data from old regions at 1731243764005 (+9 ms)Region opened successfully at 1731243764009 (+4 ms) 2024-11-10T13:02:44,009 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-10T13:02:44,012 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@60aa85f9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3857ccc89b65/172.17.0.2:0 2024-11-10T13:02:44,013 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-10T13:02:44,013 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-10T13:02:44,013 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-10T13:02:44,013 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-10T13:02:44,013 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-10T13:02:44,014 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-10T13:02:44,014 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-10T13:02:44,015 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-10T13:02:44,016 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44609-0x10101f9531f0000, quorum=127.0.0.1:54757, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-10T13:02:44,017 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-10T13:02:44,017 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-10T13:02:44,018 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44609-0x10101f9531f0000, quorum=127.0.0.1:54757, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-10T13:02:44,022 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-10T13:02:44,022 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-10T13:02:44,022 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44609-0x10101f9531f0000, quorum=127.0.0.1:54757, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-10T13:02:44,023 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-10T13:02:44,024 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44609-0x10101f9531f0000, quorum=127.0.0.1:54757, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-10T13:02:44,025 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-10T13:02:44,026 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44609-0x10101f9531f0000, quorum=127.0.0.1:54757, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-10T13:02:44,027 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-10T13:02:44,029 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42123-0x10101f9531f0001, quorum=127.0.0.1:54757, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-10T13:02:44,030 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44609-0x10101f9531f0000, quorum=127.0.0.1:54757, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-10T13:02:44,030 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42123-0x10101f9531f0001, quorum=127.0.0.1:54757, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:02:44,030 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44609-0x10101f9531f0000, quorum=127.0.0.1:54757, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:02:44,030 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=3857ccc89b65,44609,1731243763869, sessionid=0x10101f9531f0000, setting cluster-up flag (Was=false) 2024-11-10T13:02:44,032 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44609-0x10101f9531f0000, quorum=127.0.0.1:54757, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:02:44,032 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42123-0x10101f9531f0001, quorum=127.0.0.1:54757, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:02:44,036 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-10T13:02:44,037 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3857ccc89b65,44609,1731243763869 2024-11-10T13:02:44,039 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42123-0x10101f9531f0001, quorum=127.0.0.1:54757, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:02:44,039 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44609-0x10101f9531f0000, quorum=127.0.0.1:54757, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:02:44,045 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-10T13:02:44,045 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3857ccc89b65,44609,1731243763869 2024-11-10T13:02:44,046 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-10T13:02:44,048 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-10T13:02:44,048 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-10T13:02:44,048 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-10T13:02:44,048 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 3857ccc89b65,44609,1731243763869 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-10T13:02:44,049 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/3857ccc89b65:0, corePoolSize=5, maxPoolSize=5 2024-11-10T13:02:44,049 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/3857ccc89b65:0, corePoolSize=5, maxPoolSize=5 2024-11-10T13:02:44,049 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/3857ccc89b65:0, corePoolSize=5, maxPoolSize=5 2024-11-10T13:02:44,050 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/3857ccc89b65:0, corePoolSize=5, maxPoolSize=5 2024-11-10T13:02:44,050 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/3857ccc89b65:0, corePoolSize=10, maxPoolSize=10 2024-11-10T13:02:44,050 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:02:44,050 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/3857ccc89b65:0, corePoolSize=2, maxPoolSize=2 2024-11-10T13:02:44,050 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:02:44,052 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731243794052 2024-11-10T13:02:44,052 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-10T13:02:44,052 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-10T13:02:44,052 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-10T13:02:44,052 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-10T13:02:44,052 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-10T13:02:44,052 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-10T13:02:44,053 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-10T13:02:44,053 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-10T13:02:44,053 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-10T13:02:44,053 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-10T13:02:44,053 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-10T13:02:44,053 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-10T13:02:44,053 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:02:44,053 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-10T13:02:44,053 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-10T13:02:44,053 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-10T13:02:44,054 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/3857ccc89b65:0:becomeActiveMaster-HFileCleaner.large.0-1731243764053,5,FailOnTimeoutGroup] 2024-11-10T13:02:44,054 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/3857ccc89b65:0:becomeActiveMaster-HFileCleaner.small.0-1731243764054,5,FailOnTimeoutGroup] 2024-11-10T13:02:44,054 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-10T13:02:44,054 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-10T13:02:44,054 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-10T13:02:44,054 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-10T13:02:44,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44757 is added to blk_1073741831_1007 (size=1321) 2024-11-10T13:02:44,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46573 is added to blk_1073741831_1007 (size=1321) 2024-11-10T13:02:44,062 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-10T13:02:44,062 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9 2024-11-10T13:02:44,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44757 is added to blk_1073741832_1008 (size=32) 2024-11-10T13:02:44,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46573 is added to blk_1073741832_1008 (size=32) 2024-11-10T13:02:44,070 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T13:02:44,071 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-10T13:02:44,072 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-10T13:02:44,072 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:02:44,073 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:02:44,073 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-10T13:02:44,074 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-10T13:02:44,074 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:02:44,074 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:02:44,074 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-10T13:02:44,075 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-10T13:02:44,075 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:02:44,075 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:02:44,076 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-10T13:02:44,076 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-10T13:02:44,076 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:02:44,077 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:02:44,077 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-10T13:02:44,077 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/hbase/meta/1588230740 2024-11-10T13:02:44,078 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/hbase/meta/1588230740 2024-11-10T13:02:44,079 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-10T13:02:44,079 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-10T13:02:44,079 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-10T13:02:44,080 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-10T13:02:44,081 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T13:02:44,082 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=777863, jitterRate=-0.010896831750869751}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-10T13:02:44,082 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731243764070Initializing all the Stores at 1731243764071 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731243764071Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731243764071Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731243764071Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731243764071Cleaning up temporary data from old regions at 1731243764079 (+8 ms)Region opened successfully at 1731243764082 (+3 ms) 2024-11-10T13:02:44,082 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-10T13:02:44,082 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-10T13:02:44,082 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-10T13:02:44,082 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-10T13:02:44,082 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-10T13:02:44,083 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-10T13:02:44,083 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731243764082Disabling compacts and flushes for region at 1731243764082Disabling writes for close at 1731243764082Writing region close event to WAL at 1731243764083 (+1 ms)Closed at 1731243764083 2024-11-10T13:02:44,084 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-10T13:02:44,084 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-10T13:02:44,084 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-10T13:02:44,085 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-10T13:02:44,086 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-10T13:02:44,125 INFO [RS:0;3857ccc89b65:42123 {}] regionserver.HRegionServer(746): ClusterId : 800ee844-adf9-41cb-91dd-388337b0f5b7 2024-11-10T13:02:44,125 DEBUG [RS:0;3857ccc89b65:42123 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-10T13:02:44,127 DEBUG [RS:0;3857ccc89b65:42123 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-10T13:02:44,127 DEBUG [RS:0;3857ccc89b65:42123 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-10T13:02:44,130 DEBUG [RS:0;3857ccc89b65:42123 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-10T13:02:44,130 DEBUG [RS:0;3857ccc89b65:42123 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@10202e01, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3857ccc89b65/172.17.0.2:0 2024-11-10T13:02:44,142 DEBUG [RS:0;3857ccc89b65:42123 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;3857ccc89b65:42123 2024-11-10T13:02:44,142 INFO [RS:0;3857ccc89b65:42123 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-10T13:02:44,142 INFO [RS:0;3857ccc89b65:42123 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-10T13:02:44,142 DEBUG [RS:0;3857ccc89b65:42123 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-10T13:02:44,143 INFO [RS:0;3857ccc89b65:42123 {}] regionserver.HRegionServer(2659): reportForDuty to master=3857ccc89b65,44609,1731243763869 with port=42123, startcode=1731243763912 2024-11-10T13:02:44,143 DEBUG [RS:0;3857ccc89b65:42123 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-10T13:02:44,145 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42843, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-11-10T13:02:44,145 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44609 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3857ccc89b65,42123,1731243763912 2024-11-10T13:02:44,145 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44609 {}] master.ServerManager(517): Registering regionserver=3857ccc89b65,42123,1731243763912 2024-11-10T13:02:44,147 DEBUG [RS:0;3857ccc89b65:42123 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9 2024-11-10T13:02:44,147 DEBUG [RS:0;3857ccc89b65:42123 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36499 2024-11-10T13:02:44,147 DEBUG [RS:0;3857ccc89b65:42123 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-10T13:02:44,148 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44609-0x10101f9531f0000, quorum=127.0.0.1:54757, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-10T13:02:44,148 DEBUG [RS:0;3857ccc89b65:42123 {}] zookeeper.ZKUtil(111): regionserver:42123-0x10101f9531f0001, quorum=127.0.0.1:54757, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3857ccc89b65,42123,1731243763912 2024-11-10T13:02:44,149 WARN [RS:0;3857ccc89b65:42123 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-10T13:02:44,149 INFO [RS:0;3857ccc89b65:42123 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-10T13:02:44,149 DEBUG [RS:0;3857ccc89b65:42123 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/WALs/3857ccc89b65,42123,1731243763912 2024-11-10T13:02:44,149 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3857ccc89b65,42123,1731243763912] 2024-11-10T13:02:44,152 INFO [RS:0;3857ccc89b65:42123 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-10T13:02:44,153 INFO [RS:0;3857ccc89b65:42123 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-10T13:02:44,153 INFO [RS:0;3857ccc89b65:42123 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-10T13:02:44,154 INFO [RS:0;3857ccc89b65:42123 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T13:02:44,154 INFO [RS:0;3857ccc89b65:42123 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-10T13:02:44,154 INFO [RS:0;3857ccc89b65:42123 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-10T13:02:44,154 INFO [RS:0;3857ccc89b65:42123 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-10T13:02:44,155 DEBUG [RS:0;3857ccc89b65:42123 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:02:44,155 DEBUG [RS:0;3857ccc89b65:42123 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:02:44,155 DEBUG [RS:0;3857ccc89b65:42123 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:02:44,155 DEBUG [RS:0;3857ccc89b65:42123 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:02:44,155 DEBUG [RS:0;3857ccc89b65:42123 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:02:44,155 DEBUG [RS:0;3857ccc89b65:42123 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3857ccc89b65:0, corePoolSize=2, maxPoolSize=2 2024-11-10T13:02:44,155 DEBUG [RS:0;3857ccc89b65:42123 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:02:44,155 DEBUG [RS:0;3857ccc89b65:42123 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:02:44,155 DEBUG [RS:0;3857ccc89b65:42123 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:02:44,155 DEBUG [RS:0;3857ccc89b65:42123 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:02:44,155 DEBUG [RS:0;3857ccc89b65:42123 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:02:44,155 DEBUG [RS:0;3857ccc89b65:42123 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:02:44,155 DEBUG [RS:0;3857ccc89b65:42123 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3857ccc89b65:0, corePoolSize=3, maxPoolSize=3 2024-11-10T13:02:44,155 DEBUG [RS:0;3857ccc89b65:42123 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3857ccc89b65:0, corePoolSize=3, maxPoolSize=3 2024-11-10T13:02:44,159 INFO [RS:0;3857ccc89b65:42123 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T13:02:44,159 INFO [RS:0;3857ccc89b65:42123 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T13:02:44,159 INFO [RS:0;3857ccc89b65:42123 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T13:02:44,159 INFO [RS:0;3857ccc89b65:42123 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-10T13:02:44,159 INFO [RS:0;3857ccc89b65:42123 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-10T13:02:44,159 INFO [RS:0;3857ccc89b65:42123 {}] hbase.ChoreService(168): Chore ScheduledChore name=3857ccc89b65,42123,1731243763912-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-10T13:02:44,175 INFO [RS:0;3857ccc89b65:42123 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-10T13:02:44,175 INFO [RS:0;3857ccc89b65:42123 {}] hbase.ChoreService(168): Chore ScheduledChore name=3857ccc89b65,42123,1731243763912-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T13:02:44,175 INFO [RS:0;3857ccc89b65:42123 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T13:02:44,175 INFO [RS:0;3857ccc89b65:42123 {}] regionserver.Replication(171): 3857ccc89b65,42123,1731243763912 started 2024-11-10T13:02:44,188 INFO [RS:0;3857ccc89b65:42123 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T13:02:44,188 INFO [RS:0;3857ccc89b65:42123 {}] regionserver.HRegionServer(1482): Serving as 3857ccc89b65,42123,1731243763912, RpcServer on 3857ccc89b65/172.17.0.2:42123, sessionid=0x10101f9531f0001 2024-11-10T13:02:44,188 DEBUG [RS:0;3857ccc89b65:42123 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-10T13:02:44,188 DEBUG [RS:0;3857ccc89b65:42123 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3857ccc89b65,42123,1731243763912 2024-11-10T13:02:44,188 DEBUG [RS:0;3857ccc89b65:42123 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3857ccc89b65,42123,1731243763912' 2024-11-10T13:02:44,188 DEBUG [RS:0;3857ccc89b65:42123 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-10T13:02:44,189 DEBUG [RS:0;3857ccc89b65:42123 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-10T13:02:44,189 DEBUG [RS:0;3857ccc89b65:42123 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-10T13:02:44,189 DEBUG [RS:0;3857ccc89b65:42123 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-10T13:02:44,189 DEBUG [RS:0;3857ccc89b65:42123 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3857ccc89b65,42123,1731243763912 2024-11-10T13:02:44,189 DEBUG [RS:0;3857ccc89b65:42123 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3857ccc89b65,42123,1731243763912' 2024-11-10T13:02:44,189 DEBUG [RS:0;3857ccc89b65:42123 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-10T13:02:44,189 DEBUG [RS:0;3857ccc89b65:42123 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-10T13:02:44,190 DEBUG [RS:0;3857ccc89b65:42123 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-10T13:02:44,190 INFO [RS:0;3857ccc89b65:42123 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-10T13:02:44,190 INFO [RS:0;3857ccc89b65:42123 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-10T13:02:44,236 WARN [3857ccc89b65:44609 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-10T13:02:44,292 INFO [RS:0;3857ccc89b65:42123 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3857ccc89b65%2C42123%2C1731243763912, suffix=, logDir=hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/WALs/3857ccc89b65,42123,1731243763912, archiveDir=hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/oldWALs, maxLogs=32 2024-11-10T13:02:44,292 INFO [RS:0;3857ccc89b65:42123 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3857ccc89b65%2C42123%2C1731243763912.1731243764292 2024-11-10T13:02:44,298 INFO [RS:0;3857ccc89b65:42123 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/WALs/3857ccc89b65,42123,1731243763912/3857ccc89b65%2C42123%2C1731243763912.1731243764292 2024-11-10T13:02:44,298 DEBUG [RS:0;3857ccc89b65:42123 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33133:33133),(127.0.0.1/127.0.0.1:37721:37721)] 2024-11-10T13:02:44,486 DEBUG [3857ccc89b65:44609 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-10T13:02:44,487 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=3857ccc89b65,42123,1731243763912 2024-11-10T13:02:44,488 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3857ccc89b65,42123,1731243763912, state=OPENING 2024-11-10T13:02:44,490 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-10T13:02:44,491 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42123-0x10101f9531f0001, quorum=127.0.0.1:54757, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:02:44,491 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44609-0x10101f9531f0000, quorum=127.0.0.1:54757, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:02:44,492 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T13:02:44,492 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T13:02:44,492 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-10T13:02:44,492 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=3857ccc89b65,42123,1731243763912}] 2024-11-10T13:02:44,645 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-10T13:02:44,647 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38655, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-10T13:02:44,651 INFO [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-10T13:02:44,651 INFO [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-10T13:02:44,653 INFO [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3857ccc89b65%2C42123%2C1731243763912.meta, suffix=.meta, logDir=hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/WALs/3857ccc89b65,42123,1731243763912, archiveDir=hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/oldWALs, maxLogs=32 2024-11-10T13:02:44,654 INFO [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 3857ccc89b65%2C42123%2C1731243763912.meta.1731243764653.meta 2024-11-10T13:02:44,659 INFO [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/WALs/3857ccc89b65,42123,1731243763912/3857ccc89b65%2C42123%2C1731243763912.meta.1731243764653.meta 2024-11-10T13:02:44,660 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37721:37721),(127.0.0.1/127.0.0.1:33133:33133)] 2024-11-10T13:02:44,661 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-10T13:02:44,661 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-10T13:02:44,661 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-10T13:02:44,661 INFO [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-10T13:02:44,661 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-10T13:02:44,661 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T13:02:44,661 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-10T13:02:44,661 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-10T13:02:44,662 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-10T13:02:44,663 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-10T13:02:44,663 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:02:44,664 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:02:44,664 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-10T13:02:44,665 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-10T13:02:44,665 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:02:44,665 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:02:44,665 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-10T13:02:44,666 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-10T13:02:44,666 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:02:44,666 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:02:44,666 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-10T13:02:44,667 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-10T13:02:44,667 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:02:44,667 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:02:44,667 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-10T13:02:44,668 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/hbase/meta/1588230740 2024-11-10T13:02:44,669 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/hbase/meta/1588230740 2024-11-10T13:02:44,670 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-10T13:02:44,670 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-10T13:02:44,670 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-10T13:02:44,671 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-10T13:02:44,672 INFO [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=833118, jitterRate=0.05936436355113983}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-10T13:02:44,672 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-10T13:02:44,673 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731243764661Writing region info on filesystem at 1731243764661Initializing all the Stores at 1731243764662 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731243764662Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731243764662Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731243764662Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731243764662Cleaning up temporary data from old regions at 1731243764670 (+8 ms)Running coprocessor post-open hooks at 1731243764672 (+2 ms)Region opened successfully at 1731243764673 (+1 ms) 2024-11-10T13:02:44,674 INFO [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731243764644 2024-11-10T13:02:44,676 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-10T13:02:44,676 INFO [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-10T13:02:44,677 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=3857ccc89b65,42123,1731243763912 2024-11-10T13:02:44,678 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3857ccc89b65,42123,1731243763912, state=OPEN 2024-11-10T13:02:44,683 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44609-0x10101f9531f0000, quorum=127.0.0.1:54757, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-10T13:02:44,683 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42123-0x10101f9531f0001, quorum=127.0.0.1:54757, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-10T13:02:44,683 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T13:02:44,683 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T13:02:44,683 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=3857ccc89b65,42123,1731243763912 2024-11-10T13:02:44,686 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-10T13:02:44,686 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=3857ccc89b65,42123,1731243763912 in 191 msec 2024-11-10T13:02:44,688 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-10T13:02:44,688 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 602 msec 2024-11-10T13:02:44,689 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-10T13:02:44,689 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-10T13:02:44,690 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-10T13:02:44,690 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3857ccc89b65,42123,1731243763912, seqNum=-1] 2024-11-10T13:02:44,691 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-10T13:02:44,692 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40119, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-10T13:02:44,696 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 648 msec 2024-11-10T13:02:44,696 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731243764696, completionTime=-1 2024-11-10T13:02:44,696 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-10T13:02:44,696 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-10T13:02:44,698 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-10T13:02:44,698 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731243824698 2024-11-10T13:02:44,698 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731243884698 2024-11-10T13:02:44,698 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 1 msec 2024-11-10T13:02:44,698 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3857ccc89b65,44609,1731243763869-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T13:02:44,698 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3857ccc89b65,44609,1731243763869-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T13:02:44,698 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3857ccc89b65,44609,1731243763869-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T13:02:44,699 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-3857ccc89b65:44609, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T13:02:44,699 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-10T13:02:44,699 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-10T13:02:44,700 DEBUG [master/3857ccc89b65:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-10T13:02:44,702 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.762sec 2024-11-10T13:02:44,702 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-10T13:02:44,702 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-10T13:02:44,702 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-10T13:02:44,702 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-10T13:02:44,702 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-10T13:02:44,702 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3857ccc89b65,44609,1731243763869-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-10T13:02:44,702 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3857ccc89b65,44609,1731243763869-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-10T13:02:44,704 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-10T13:02:44,704 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-10T13:02:44,704 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3857ccc89b65,44609,1731243763869-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T13:02:44,725 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@470e396e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-10T13:02:44,725 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 3857ccc89b65,44609,-1 for getting cluster id 2024-11-10T13:02:44,725 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-10T13:02:44,727 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '800ee844-adf9-41cb-91dd-388337b0f5b7' 2024-11-10T13:02:44,727 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-10T13:02:44,727 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "800ee844-adf9-41cb-91dd-388337b0f5b7" 2024-11-10T13:02:44,727 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@12396787, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-10T13:02:44,727 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3857ccc89b65,44609,-1] 2024-11-10T13:02:44,727 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-10T13:02:44,728 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T13:02:44,729 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34838, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-10T13:02:44,730 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@499df229, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-10T13:02:44,730 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-10T13:02:44,731 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3857ccc89b65,42123,1731243763912, seqNum=-1] 2024-11-10T13:02:44,731 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-10T13:02:44,732 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34104, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-10T13:02:44,733 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=3857ccc89b65,44609,1731243763869 2024-11-10T13:02:44,734 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:02:44,736 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-10T13:02:44,736 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-10T13:02:44,737 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is 3857ccc89b65,44609,1731243763869 2024-11-10T13:02:44,737 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@69624f08 2024-11-10T13:02:44,737 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-10T13:02:44,738 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34850, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-10T13:02:44,738 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44609 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-10T13:02:44,738 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44609 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-10T13:02:44,739 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44609 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-10T13:02:44,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44609 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-11-10T13:02:44,741 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-10T13:02:44,741 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:02:44,741 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44609 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-11-10T13:02:44,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44609 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-10T13:02:44,742 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-10T13:02:44,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46573 is added to blk_1073741835_1011 (size=381) 2024-11-10T13:02:44,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44757 is added to blk_1073741835_1011 (size=381) 2024-11-10T13:02:44,750 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 098c1ce7807e5e33f69801e44af102e7, NAME => 'TestLogRolling-testLogRolling,,1731243764738.098c1ce7807e5e33f69801e44af102e7.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9 2024-11-10T13:02:44,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44757 is added to blk_1073741836_1012 (size=64) 2024-11-10T13:02:44,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46573 is added to blk_1073741836_1012 (size=64) 2024-11-10T13:02:44,757 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731243764738.098c1ce7807e5e33f69801e44af102e7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T13:02:44,757 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 098c1ce7807e5e33f69801e44af102e7, disabling compactions & flushes 2024-11-10T13:02:44,757 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731243764738.098c1ce7807e5e33f69801e44af102e7. 2024-11-10T13:02:44,757 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731243764738.098c1ce7807e5e33f69801e44af102e7. 2024-11-10T13:02:44,757 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731243764738.098c1ce7807e5e33f69801e44af102e7. after waiting 0 ms 2024-11-10T13:02:44,757 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731243764738.098c1ce7807e5e33f69801e44af102e7. 2024-11-10T13:02:44,757 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731243764738.098c1ce7807e5e33f69801e44af102e7. 2024-11-10T13:02:44,757 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 098c1ce7807e5e33f69801e44af102e7: Waiting for close lock at 1731243764757Disabling compacts and flushes for region at 1731243764757Disabling writes for close at 1731243764757Writing region close event to WAL at 1731243764757Closed at 1731243764757 2024-11-10T13:02:44,758 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-10T13:02:44,759 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1731243764738.098c1ce7807e5e33f69801e44af102e7.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731243764758"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731243764758"}]},"ts":"1731243764758"} 2024-11-10T13:02:44,761 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-10T13:02:44,762 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-10T13:02:44,762 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731243764762"}]},"ts":"1731243764762"} 2024-11-10T13:02:44,764 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-11-10T13:02:44,764 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=098c1ce7807e5e33f69801e44af102e7, ASSIGN}] 2024-11-10T13:02:44,765 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=098c1ce7807e5e33f69801e44af102e7, ASSIGN 2024-11-10T13:02:44,767 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=098c1ce7807e5e33f69801e44af102e7, ASSIGN; state=OFFLINE, location=3857ccc89b65,42123,1731243763912; forceNewPlan=false, retain=false 2024-11-10T13:02:44,917 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=098c1ce7807e5e33f69801e44af102e7, regionState=OPENING, regionLocation=3857ccc89b65,42123,1731243763912 2024-11-10T13:02:44,919 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=098c1ce7807e5e33f69801e44af102e7, ASSIGN because future has completed 2024-11-10T13:02:44,920 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 098c1ce7807e5e33f69801e44af102e7, server=3857ccc89b65,42123,1731243763912}] 2024-11-10T13:02:45,004 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:45,004 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:45,076 INFO [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731243764738.098c1ce7807e5e33f69801e44af102e7. 2024-11-10T13:02:45,076 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 098c1ce7807e5e33f69801e44af102e7, NAME => 'TestLogRolling-testLogRolling,,1731243764738.098c1ce7807e5e33f69801e44af102e7.', STARTKEY => '', ENDKEY => ''} 2024-11-10T13:02:45,077 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 098c1ce7807e5e33f69801e44af102e7 2024-11-10T13:02:45,077 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731243764738.098c1ce7807e5e33f69801e44af102e7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T13:02:45,077 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 098c1ce7807e5e33f69801e44af102e7 2024-11-10T13:02:45,077 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 098c1ce7807e5e33f69801e44af102e7 2024-11-10T13:02:45,078 INFO [StoreOpener-098c1ce7807e5e33f69801e44af102e7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 098c1ce7807e5e33f69801e44af102e7 2024-11-10T13:02:45,079 INFO [StoreOpener-098c1ce7807e5e33f69801e44af102e7-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 098c1ce7807e5e33f69801e44af102e7 columnFamilyName info 2024-11-10T13:02:45,079 DEBUG [StoreOpener-098c1ce7807e5e33f69801e44af102e7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:02:45,080 INFO [StoreOpener-098c1ce7807e5e33f69801e44af102e7-1 {}] regionserver.HStore(327): Store=098c1ce7807e5e33f69801e44af102e7/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T13:02:45,080 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 098c1ce7807e5e33f69801e44af102e7 2024-11-10T13:02:45,081 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7 2024-11-10T13:02:45,081 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7 2024-11-10T13:02:45,081 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 098c1ce7807e5e33f69801e44af102e7 2024-11-10T13:02:45,081 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 098c1ce7807e5e33f69801e44af102e7 2024-11-10T13:02:45,083 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 098c1ce7807e5e33f69801e44af102e7 2024-11-10T13:02:45,085 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T13:02:45,085 INFO [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 098c1ce7807e5e33f69801e44af102e7; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=824827, jitterRate=0.04882213473320007}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-10T13:02:45,085 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 098c1ce7807e5e33f69801e44af102e7 2024-11-10T13:02:45,086 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 098c1ce7807e5e33f69801e44af102e7: Running coprocessor pre-open hook at 1731243765077Writing region info on filesystem at 1731243765077Initializing all the Stores at 1731243765078 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731243765078Cleaning up temporary data from old regions at 1731243765081 (+3 ms)Running coprocessor post-open hooks at 1731243765085 (+4 ms)Region opened successfully at 1731243765085 2024-11-10T13:02:45,087 INFO [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731243764738.098c1ce7807e5e33f69801e44af102e7., pid=6, masterSystemTime=1731243765073 2024-11-10T13:02:45,089 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731243764738.098c1ce7807e5e33f69801e44af102e7. 2024-11-10T13:02:45,089 INFO [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731243764738.098c1ce7807e5e33f69801e44af102e7. 2024-11-10T13:02:45,089 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=098c1ce7807e5e33f69801e44af102e7, regionState=OPEN, openSeqNum=2, regionLocation=3857ccc89b65,42123,1731243763912 2024-11-10T13:02:45,091 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 098c1ce7807e5e33f69801e44af102e7, server=3857ccc89b65,42123,1731243763912 because future has completed 2024-11-10T13:02:45,095 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-10T13:02:45,095 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 098c1ce7807e5e33f69801e44af102e7, server=3857ccc89b65,42123,1731243763912 in 173 msec 2024-11-10T13:02:45,097 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-10T13:02:45,098 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=098c1ce7807e5e33f69801e44af102e7, ASSIGN in 331 msec 2024-11-10T13:02:45,098 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-10T13:02:45,099 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731243765098"}]},"ts":"1731243765098"} 2024-11-10T13:02:45,100 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-11-10T13:02:45,101 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-10T13:02:45,103 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 362 msec 2024-11-10T13:02:46,005 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:46,005 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:47,005 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:47,005 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:47,682 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:02:47,682 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:02:47,682 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:02:47,682 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:02:47,682 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:02:47,683 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:02:47,683 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:02:47,683 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:02:47,698 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:02:47,698 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:02:47,698 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:02:47,699 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:02:47,699 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:02:47,699 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:02:47,702 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:02:47,702 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:02:47,702 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:02:47,704 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:02:48,006 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:48,006 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:48,208 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-10T13:02:48,209 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:02:48,209 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:02:48,209 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:02:48,209 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:02:48,210 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:02:48,210 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:02:48,211 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:02:48,211 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:02:48,230 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:02:48,231 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:02:48,231 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:02:48,231 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:02:48,231 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:02:48,231 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:02:48,234 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:02:48,234 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:02:48,235 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:02:48,237 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:02:49,007 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:49,007 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:50,007 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:50,007 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:50,152 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-10T13:02:50,152 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-11-10T13:02:51,008 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:51,008 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:52,008 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:52,008 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:52,309 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-10T13:02:52,309 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-10T13:02:52,310 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-10T13:02:53,009 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:53,009 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:54,010 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:54,010 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:54,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44609 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-10T13:02:54,833 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-11-10T13:02:54,833 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-11-10T13:02:54,836 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-11-10T13:02:54,836 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1731243764738.098c1ce7807e5e33f69801e44af102e7. 2024-11-10T13:02:54,838 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1731243764738.098c1ce7807e5e33f69801e44af102e7., hostname=3857ccc89b65,42123,1731243763912, seqNum=2] 2024-11-10T13:02:54,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42123 {}] regionserver.HRegion(8855): Flush requested on 098c1ce7807e5e33f69801e44af102e7 2024-11-10T13:02:54,850 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 098c1ce7807e5e33f69801e44af102e7 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-10T13:02:54,868 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/.tmp/info/d9bcf1044a7446ea8d4d1e513b7b093c is 1080, key is row0001/info:/1731243774839/Put/seqid=0 2024-11-10T13:02:54,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46573 is added to blk_1073741837_1013 (size=12509) 2024-11-10T13:02:54,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44757 is added to blk_1073741837_1013 (size=12509) 2024-11-10T13:02:54,874 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/.tmp/info/d9bcf1044a7446ea8d4d1e513b7b093c 2024-11-10T13:02:54,881 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/.tmp/info/d9bcf1044a7446ea8d4d1e513b7b093c as hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/info/d9bcf1044a7446ea8d4d1e513b7b093c 2024-11-10T13:02:54,887 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/info/d9bcf1044a7446ea8d4d1e513b7b093c, entries=7, sequenceid=11, filesize=12.2 K 2024-11-10T13:02:54,887 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42123 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=098c1ce7807e5e33f69801e44af102e7, server=3857ccc89b65,42123,1731243763912 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-10T13:02:54,888 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for 098c1ce7807e5e33f69801e44af102e7 in 37ms, sequenceid=11, compaction requested=false 2024-11-10T13:02:54,888 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 098c1ce7807e5e33f69801e44af102e7: 2024-11-10T13:02:54,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42123 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:34104 deadline: 1731243784886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=098c1ce7807e5e33f69801e44af102e7, server=3857ccc89b65,42123,1731243763912 2024-11-10T13:02:54,910 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1731243764738.098c1ce7807e5e33f69801e44af102e7., hostname=3857ccc89b65,42123,1731243763912, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1731243764738.098c1ce7807e5e33f69801e44af102e7., hostname=3857ccc89b65,42123,1731243763912, seqNum=2, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=098c1ce7807e5e33f69801e44af102e7, server=3857ccc89b65,42123,1731243763912 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-10T13:02:54,911 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1731243764738.098c1ce7807e5e33f69801e44af102e7., hostname=3857ccc89b65,42123,1731243763912, seqNum=2 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=098c1ce7807e5e33f69801e44af102e7, server=3857ccc89b65,42123,1731243763912 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-10T13:02:54,911 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,,1731243764738.098c1ce7807e5e33f69801e44af102e7., hostname=3857ccc89b65,42123,1731243763912, seqNum=2 because the exception is null or not the one we care about 2024-11-10T13:02:55,010 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:55,010 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:56,011 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:56,011 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:57,011 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:57,011 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:57,812 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-10T13:02:57,813 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:02:57,813 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:02:57,813 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:02:57,814 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:02:57,814 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:02:57,814 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:02:57,815 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:02:57,815 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:02:57,835 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:02:57,835 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:02:57,835 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:02:57,836 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:02:57,836 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:02:57,836 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:02:57,839 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:02:57,839 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:02:57,839 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:02:57,841 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:02:58,012 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:58,012 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:59,013 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:02:59,013 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:00,013 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:00,013 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:01,014 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:01,014 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:02,014 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:02,014 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:03,015 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:03,015 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:04,016 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:04,016 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:04,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42123 {}] regionserver.HRegion(8855): Flush requested on 098c1ce7807e5e33f69801e44af102e7 2024-11-10T13:03:04,955 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 098c1ce7807e5e33f69801e44af102e7 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-11-10T13:03:04,960 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/.tmp/info/29c2a44adebe4e1ba214c484894faa33 is 1080, key is row0008/info:/1731243774851/Put/seqid=0 2024-11-10T13:03:04,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44757 is added to blk_1073741838_1014 (size=29761) 2024-11-10T13:03:04,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46573 is added to blk_1073741838_1014 (size=29761) 2024-11-10T13:03:04,965 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/.tmp/info/29c2a44adebe4e1ba214c484894faa33 2024-11-10T13:03:04,971 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/.tmp/info/29c2a44adebe4e1ba214c484894faa33 as hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/info/29c2a44adebe4e1ba214c484894faa33 2024-11-10T13:03:04,975 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/info/29c2a44adebe4e1ba214c484894faa33, entries=23, sequenceid=37, filesize=29.1 K 2024-11-10T13:03:04,976 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=2.10 KB/2152 for 098c1ce7807e5e33f69801e44af102e7 in 21ms, sequenceid=37, compaction requested=false 2024-11-10T13:03:04,976 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 098c1ce7807e5e33f69801e44af102e7: 2024-11-10T13:03:04,976 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=41.3 K, sizeToCheck=16.0 K 2024-11-10T13:03:04,976 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-10T13:03:04,976 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/info/29c2a44adebe4e1ba214c484894faa33 because midkey is the same as first or last row 2024-11-10T13:03:05,016 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:05,016 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:06,017 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:06,017 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:06,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42123 {}] regionserver.HRegion(8855): Flush requested on 098c1ce7807e5e33f69801e44af102e7 2024-11-10T13:03:06,968 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 098c1ce7807e5e33f69801e44af102e7 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-10T13:03:06,972 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/.tmp/info/875508865d1b453b8e6837d52c13eeb3 is 1080, key is row0031/info:/1731243784956/Put/seqid=0 2024-11-10T13:03:06,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46573 is added to blk_1073741839_1015 (size=12509) 2024-11-10T13:03:06,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44757 is added to blk_1073741839_1015 (size=12509) 2024-11-10T13:03:06,978 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=47 (bloomFilter=true), to=hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/.tmp/info/875508865d1b453b8e6837d52c13eeb3 2024-11-10T13:03:06,985 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/.tmp/info/875508865d1b453b8e6837d52c13eeb3 as hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/info/875508865d1b453b8e6837d52c13eeb3 2024-11-10T13:03:06,989 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/info/875508865d1b453b8e6837d52c13eeb3, entries=7, sequenceid=47, filesize=12.2 K 2024-11-10T13:03:06,990 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=14.71 KB/15064 for 098c1ce7807e5e33f69801e44af102e7 in 22ms, sequenceid=47, compaction requested=true 2024-11-10T13:03:06,990 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 098c1ce7807e5e33f69801e44af102e7: 2024-11-10T13:03:06,991 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=53.5 K, sizeToCheck=16.0 K 2024-11-10T13:03:06,991 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-10T13:03:06,991 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/info/29c2a44adebe4e1ba214c484894faa33 because midkey is the same as first or last row 2024-11-10T13:03:06,991 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 098c1ce7807e5e33f69801e44af102e7:info, priority=-2147483648, current under compaction store size is 1 2024-11-10T13:03:06,991 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-10T13:03:06,991 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-10T13:03:06,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42123 {}] regionserver.HRegion(8855): Flush requested on 098c1ce7807e5e33f69801e44af102e7 2024-11-10T13:03:06,991 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 098c1ce7807e5e33f69801e44af102e7 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-10T13:03:06,992 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 54779 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-10T13:03:06,993 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.HStore(1541): 098c1ce7807e5e33f69801e44af102e7/info is initiating minor compaction (all files) 2024-11-10T13:03:06,993 INFO [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 098c1ce7807e5e33f69801e44af102e7/info in TestLogRolling-testLogRolling,,1731243764738.098c1ce7807e5e33f69801e44af102e7. 2024-11-10T13:03:06,993 INFO [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/info/d9bcf1044a7446ea8d4d1e513b7b093c, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/info/29c2a44adebe4e1ba214c484894faa33, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/info/875508865d1b453b8e6837d52c13eeb3] into tmpdir=hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/.tmp, totalSize=53.5 K 2024-11-10T13:03:06,993 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] compactions.Compactor(225): Compacting d9bcf1044a7446ea8d4d1e513b7b093c, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731243774839 2024-11-10T13:03:06,994 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] compactions.Compactor(225): Compacting 29c2a44adebe4e1ba214c484894faa33, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1731243774851 2024-11-10T13:03:06,994 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] compactions.Compactor(225): Compacting 875508865d1b453b8e6837d52c13eeb3, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1731243784956 2024-11-10T13:03:06,996 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/.tmp/info/f80361833d1046ec8bf5c884617534f3 is 1080, key is row0038/info:/1731243786969/Put/seqid=0 2024-11-10T13:03:07,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44757 is added to blk_1073741840_1016 (size=21141) 2024-11-10T13:03:07,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46573 is added to blk_1073741840_1016 (size=21141) 2024-11-10T13:03:07,002 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=65 (bloomFilter=true), to=hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/.tmp/info/f80361833d1046ec8bf5c884617534f3 2024-11-10T13:03:07,008 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/.tmp/info/f80361833d1046ec8bf5c884617534f3 as hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/info/f80361833d1046ec8bf5c884617534f3 2024-11-10T13:03:07,009 INFO [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 098c1ce7807e5e33f69801e44af102e7#info#compaction#60 average throughput is 18.98 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-10T13:03:07,010 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/.tmp/info/1264052c9c204d4986db566f58422b40 is 1080, key is row0001/info:/1731243774839/Put/seqid=0 2024-11-10T13:03:07,014 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/info/f80361833d1046ec8bf5c884617534f3, entries=15, sequenceid=65, filesize=20.6 K 2024-11-10T13:03:07,015 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=12.61 KB/12912 for 098c1ce7807e5e33f69801e44af102e7 in 24ms, sequenceid=65, compaction requested=false 2024-11-10T13:03:07,015 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 098c1ce7807e5e33f69801e44af102e7: 2024-11-10T13:03:07,015 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=74.1 K, sizeToCheck=16.0 K 2024-11-10T13:03:07,015 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-10T13:03:07,015 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/info/29c2a44adebe4e1ba214c484894faa33 because midkey is the same as first or last row 2024-11-10T13:03:07,017 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:07,017 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:07,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46573 is added to blk_1073741841_1017 (size=44978) 2024-11-10T13:03:07,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44757 is added to blk_1073741841_1017 (size=44978) 2024-11-10T13:03:07,023 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/.tmp/info/1264052c9c204d4986db566f58422b40 as hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/info/1264052c9c204d4986db566f58422b40 2024-11-10T13:03:07,029 INFO [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 098c1ce7807e5e33f69801e44af102e7/info of 098c1ce7807e5e33f69801e44af102e7 into 1264052c9c204d4986db566f58422b40(size=43.9 K), total size for store is 64.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-10T13:03:07,029 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 098c1ce7807e5e33f69801e44af102e7: 2024-11-10T13:03:07,029 INFO [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731243764738.098c1ce7807e5e33f69801e44af102e7., storeName=098c1ce7807e5e33f69801e44af102e7/info, priority=13, startTime=1731243786991; duration=0sec 2024-11-10T13:03:07,029 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=64.6 K, sizeToCheck=16.0 K 2024-11-10T13:03:07,029 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-10T13:03:07,029 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/info/1264052c9c204d4986db566f58422b40 because midkey is the same as first or last row 2024-11-10T13:03:07,030 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=64.6 K, sizeToCheck=16.0 K 2024-11-10T13:03:07,030 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-10T13:03:07,030 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/info/1264052c9c204d4986db566f58422b40 because midkey is the same as first or last row 2024-11-10T13:03:07,030 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=64.6 K, sizeToCheck=16.0 K 2024-11-10T13:03:07,030 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-10T13:03:07,030 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/info/1264052c9c204d4986db566f58422b40 because midkey is the same as first or last row 2024-11-10T13:03:07,030 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-10T13:03:07,030 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 098c1ce7807e5e33f69801e44af102e7:info 2024-11-10T13:03:08,018 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:08,018 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:09,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42123 {}] regionserver.HRegion(8855): Flush requested on 098c1ce7807e5e33f69801e44af102e7 2024-11-10T13:03:09,016 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 098c1ce7807e5e33f69801e44af102e7 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-10T13:03:09,019 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:09,019 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:09,021 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/.tmp/info/08298269b8ae452da4f246c3a6bc27f0 is 1080, key is row0053/info:/1731243786993/Put/seqid=0 2024-11-10T13:03:09,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44757 is added to blk_1073741842_1018 (size=18987) 2024-11-10T13:03:09,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46573 is added to blk_1073741842_1018 (size=18987) 2024-11-10T13:03:09,027 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=82 (bloomFilter=true), to=hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/.tmp/info/08298269b8ae452da4f246c3a6bc27f0 2024-11-10T13:03:09,033 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/.tmp/info/08298269b8ae452da4f246c3a6bc27f0 as hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/info/08298269b8ae452da4f246c3a6bc27f0 2024-11-10T13:03:09,038 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/info/08298269b8ae452da4f246c3a6bc27f0, entries=13, sequenceid=82, filesize=18.5 K 2024-11-10T13:03:09,039 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=12.61 KB/12912 for 098c1ce7807e5e33f69801e44af102e7 in 23ms, sequenceid=82, compaction requested=true 2024-11-10T13:03:09,039 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 098c1ce7807e5e33f69801e44af102e7: 2024-11-10T13:03:09,039 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=83.1 K, sizeToCheck=16.0 K 2024-11-10T13:03:09,039 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-10T13:03:09,039 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/info/1264052c9c204d4986db566f58422b40 because midkey is the same as first or last row 2024-11-10T13:03:09,039 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 098c1ce7807e5e33f69801e44af102e7:info, priority=-2147483648, current under compaction store size is 1 2024-11-10T13:03:09,039 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-10T13:03:09,039 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-10T13:03:09,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42123 {}] regionserver.HRegion(8855): Flush requested on 098c1ce7807e5e33f69801e44af102e7 2024-11-10T13:03:09,041 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 098c1ce7807e5e33f69801e44af102e7 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-10T13:03:09,041 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 85106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-10T13:03:09,041 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.HStore(1541): 098c1ce7807e5e33f69801e44af102e7/info is initiating minor compaction (all files) 2024-11-10T13:03:09,041 INFO [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 098c1ce7807e5e33f69801e44af102e7/info in TestLogRolling-testLogRolling,,1731243764738.098c1ce7807e5e33f69801e44af102e7. 2024-11-10T13:03:09,041 INFO [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/info/1264052c9c204d4986db566f58422b40, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/info/f80361833d1046ec8bf5c884617534f3, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/info/08298269b8ae452da4f246c3a6bc27f0] into tmpdir=hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/.tmp, totalSize=83.1 K 2024-11-10T13:03:09,042 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1264052c9c204d4986db566f58422b40, keycount=37, bloomtype=ROW, size=43.9 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1731243774839 2024-11-10T13:03:09,042 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] compactions.Compactor(225): Compacting f80361833d1046ec8bf5c884617534f3, keycount=15, bloomtype=ROW, size=20.6 K, encoding=NONE, compression=NONE, seqNum=65, earliestPutTs=1731243786969 2024-11-10T13:03:09,043 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] compactions.Compactor(225): Compacting 08298269b8ae452da4f246c3a6bc27f0, keycount=13, bloomtype=ROW, size=18.5 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1731243786993 2024-11-10T13:03:09,045 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/.tmp/info/5db1403c2ca84c3cb8c1c27e49dbcfa0 is 1080, key is row0066/info:/1731243789017/Put/seqid=0 2024-11-10T13:03:09,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44757 is added to blk_1073741843_1019 (size=20064) 2024-11-10T13:03:09,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46573 is added to blk_1073741843_1019 (size=20064) 2024-11-10T13:03:09,052 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=99 (bloomFilter=true), to=hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/.tmp/info/5db1403c2ca84c3cb8c1c27e49dbcfa0 2024-11-10T13:03:09,057 INFO [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 098c1ce7807e5e33f69801e44af102e7#info#compaction#63 average throughput is 33.35 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-10T13:03:09,057 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/.tmp/info/92b9d02f320d4ebfa942897275801c45 is 1080, key is row0001/info:/1731243774839/Put/seqid=0 2024-11-10T13:03:09,058 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/.tmp/info/5db1403c2ca84c3cb8c1c27e49dbcfa0 as hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/info/5db1403c2ca84c3cb8c1c27e49dbcfa0 2024-11-10T13:03:09,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44757 is added to blk_1073741844_1020 (size=75378) 2024-11-10T13:03:09,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46573 is added to blk_1073741844_1020 (size=75378) 2024-11-10T13:03:09,065 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/info/5db1403c2ca84c3cb8c1c27e49dbcfa0, entries=14, sequenceid=99, filesize=19.6 K 2024-11-10T13:03:09,066 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=13.66 KB/13988 for 098c1ce7807e5e33f69801e44af102e7 in 25ms, sequenceid=99, compaction requested=false 2024-11-10T13:03:09,066 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 098c1ce7807e5e33f69801e44af102e7: 2024-11-10T13:03:09,066 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=102.7 K, sizeToCheck=16.0 K 2024-11-10T13:03:09,066 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-10T13:03:09,066 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/info/1264052c9c204d4986db566f58422b40 because midkey is the same as first or last row 2024-11-10T13:03:09,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42123 {}] regionserver.HRegion(8855): Flush requested on 098c1ce7807e5e33f69801e44af102e7 2024-11-10T13:03:09,067 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 098c1ce7807e5e33f69801e44af102e7 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-10T13:03:09,071 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/.tmp/info/47ccaeb5b6884f12aeaacb38b6376da0 is 1080, key is row0080/info:/1731243789042/Put/seqid=0 2024-11-10T13:03:09,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46573 is added to blk_1073741845_1021 (size=20064) 2024-11-10T13:03:09,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44757 is added to blk_1073741845_1021 (size=20064) 2024-11-10T13:03:09,076 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/.tmp/info/47ccaeb5b6884f12aeaacb38b6376da0 2024-11-10T13:03:09,080 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/.tmp/info/47ccaeb5b6884f12aeaacb38b6376da0 as hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/info/47ccaeb5b6884f12aeaacb38b6376da0 2024-11-10T13:03:09,085 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/info/47ccaeb5b6884f12aeaacb38b6376da0, entries=14, sequenceid=116, filesize=19.6 K 2024-11-10T13:03:09,085 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=3.15 KB/3228 for 098c1ce7807e5e33f69801e44af102e7 in 18ms, sequenceid=116, compaction requested=false 2024-11-10T13:03:09,085 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 098c1ce7807e5e33f69801e44af102e7: 2024-11-10T13:03:09,086 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=122.3 K, sizeToCheck=16.0 K 2024-11-10T13:03:09,086 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-10T13:03:09,086 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/info/1264052c9c204d4986db566f58422b40 because midkey is the same as first or last row 2024-11-10T13:03:09,470 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/.tmp/info/92b9d02f320d4ebfa942897275801c45 as hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/info/92b9d02f320d4ebfa942897275801c45 2024-11-10T13:03:09,475 INFO [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 098c1ce7807e5e33f69801e44af102e7/info of 098c1ce7807e5e33f69801e44af102e7 into 92b9d02f320d4ebfa942897275801c45(size=73.6 K), total size for store is 112.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-10T13:03:09,475 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 098c1ce7807e5e33f69801e44af102e7: 2024-11-10T13:03:09,476 INFO [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731243764738.098c1ce7807e5e33f69801e44af102e7., storeName=098c1ce7807e5e33f69801e44af102e7/info, priority=13, startTime=1731243789039; duration=0sec 2024-11-10T13:03:09,476 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=112.8 K, sizeToCheck=16.0 K 2024-11-10T13:03:09,476 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-10T13:03:09,476 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=112.8 K, sizeToCheck=16.0 K 2024-11-10T13:03:09,476 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-10T13:03:09,476 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=112.8 K, sizeToCheck=16.0 K 2024-11-10T13:03:09,476 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-10T13:03:09,477 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1731243764738.098c1ce7807e5e33f69801e44af102e7., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-10T13:03:09,477 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-10T13:03:09,477 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 098c1ce7807e5e33f69801e44af102e7:info 2024-11-10T13:03:09,479 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44609 {}] assignment.AssignmentManager(1355): Split request from 3857ccc89b65,42123,1731243763912, parent={ENCODED => 098c1ce7807e5e33f69801e44af102e7, NAME => 'TestLogRolling-testLogRolling,,1731243764738.098c1ce7807e5e33f69801e44af102e7.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-10T13:03:09,484 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44609 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=3857ccc89b65,42123,1731243763912 2024-11-10T13:03:09,487 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44609 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=098c1ce7807e5e33f69801e44af102e7, daughterA=36a7bbbdc6f9f69fcba5b0d63463cc2e, daughterB=99786fe932a03a1a763ac98ba88ea15a 2024-11-10T13:03:09,488 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=098c1ce7807e5e33f69801e44af102e7, daughterA=36a7bbbdc6f9f69fcba5b0d63463cc2e, daughterB=99786fe932a03a1a763ac98ba88ea15a 2024-11-10T13:03:09,488 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=098c1ce7807e5e33f69801e44af102e7, daughterA=36a7bbbdc6f9f69fcba5b0d63463cc2e, daughterB=99786fe932a03a1a763ac98ba88ea15a 2024-11-10T13:03:09,488 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=098c1ce7807e5e33f69801e44af102e7, daughterA=36a7bbbdc6f9f69fcba5b0d63463cc2e, daughterB=99786fe932a03a1a763ac98ba88ea15a 2024-11-10T13:03:09,495 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=098c1ce7807e5e33f69801e44af102e7, UNASSIGN}] 2024-11-10T13:03:09,496 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=098c1ce7807e5e33f69801e44af102e7, UNASSIGN 2024-11-10T13:03:09,497 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=098c1ce7807e5e33f69801e44af102e7, regionState=CLOSING, regionLocation=3857ccc89b65,42123,1731243763912 2024-11-10T13:03:09,499 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=098c1ce7807e5e33f69801e44af102e7, UNASSIGN because future has completed 2024-11-10T13:03:09,500 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-10T13:03:09,500 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 098c1ce7807e5e33f69801e44af102e7, server=3857ccc89b65,42123,1731243763912}] 2024-11-10T13:03:09,657 INFO [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close 098c1ce7807e5e33f69801e44af102e7 2024-11-10T13:03:09,657 DEBUG [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-10T13:03:09,657 DEBUG [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing 098c1ce7807e5e33f69801e44af102e7, disabling compactions & flushes 2024-11-10T13:03:09,657 INFO [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731243764738.098c1ce7807e5e33f69801e44af102e7. 2024-11-10T13:03:09,657 DEBUG [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731243764738.098c1ce7807e5e33f69801e44af102e7. 2024-11-10T13:03:09,657 DEBUG [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731243764738.098c1ce7807e5e33f69801e44af102e7. after waiting 0 ms 2024-11-10T13:03:09,657 DEBUG [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731243764738.098c1ce7807e5e33f69801e44af102e7. 2024-11-10T13:03:09,658 INFO [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(2902): Flushing 098c1ce7807e5e33f69801e44af102e7 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-10T13:03:09,662 DEBUG [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/.tmp/info/d231067444d741cbba56d6c9bbee63bb is 1080, key is row0094/info:/1731243789068/Put/seqid=0 2024-11-10T13:03:09,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44757 is added to blk_1073741846_1022 (size=8193) 2024-11-10T13:03:09,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46573 is added to blk_1073741846_1022 (size=8193) 2024-11-10T13:03:09,667 INFO [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=123 (bloomFilter=true), to=hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/.tmp/info/d231067444d741cbba56d6c9bbee63bb 2024-11-10T13:03:09,672 DEBUG [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/.tmp/info/d231067444d741cbba56d6c9bbee63bb as hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/info/d231067444d741cbba56d6c9bbee63bb 2024-11-10T13:03:09,677 INFO [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/info/d231067444d741cbba56d6c9bbee63bb, entries=3, sequenceid=123, filesize=8.0 K 2024-11-10T13:03:09,678 INFO [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 098c1ce7807e5e33f69801e44af102e7 in 20ms, sequenceid=123, compaction requested=true 2024-11-10T13:03:09,678 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731243764738.098c1ce7807e5e33f69801e44af102e7.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/info/d9bcf1044a7446ea8d4d1e513b7b093c, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/info/29c2a44adebe4e1ba214c484894faa33, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/info/1264052c9c204d4986db566f58422b40, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/info/875508865d1b453b8e6837d52c13eeb3, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/info/f80361833d1046ec8bf5c884617534f3, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/info/08298269b8ae452da4f246c3a6bc27f0] to archive 2024-11-10T13:03:09,679 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731243764738.098c1ce7807e5e33f69801e44af102e7.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-10T13:03:09,681 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731243764738.098c1ce7807e5e33f69801e44af102e7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/info/d9bcf1044a7446ea8d4d1e513b7b093c to hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/archive/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/info/d9bcf1044a7446ea8d4d1e513b7b093c 2024-11-10T13:03:09,682 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731243764738.098c1ce7807e5e33f69801e44af102e7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/info/29c2a44adebe4e1ba214c484894faa33 to hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/archive/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/info/29c2a44adebe4e1ba214c484894faa33 2024-11-10T13:03:09,683 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731243764738.098c1ce7807e5e33f69801e44af102e7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/info/1264052c9c204d4986db566f58422b40 to hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/archive/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/info/1264052c9c204d4986db566f58422b40 2024-11-10T13:03:09,684 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731243764738.098c1ce7807e5e33f69801e44af102e7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/info/875508865d1b453b8e6837d52c13eeb3 to hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/archive/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/info/875508865d1b453b8e6837d52c13eeb3 2024-11-10T13:03:09,685 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731243764738.098c1ce7807e5e33f69801e44af102e7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/info/f80361833d1046ec8bf5c884617534f3 to hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/archive/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/info/f80361833d1046ec8bf5c884617534f3 2024-11-10T13:03:09,686 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731243764738.098c1ce7807e5e33f69801e44af102e7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/info/08298269b8ae452da4f246c3a6bc27f0 to hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/archive/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/info/08298269b8ae452da4f246c3a6bc27f0 2024-11-10T13:03:09,691 DEBUG [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=1 2024-11-10T13:03:09,692 INFO [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731243764738.098c1ce7807e5e33f69801e44af102e7. 2024-11-10T13:03:09,692 DEBUG [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for 098c1ce7807e5e33f69801e44af102e7: Waiting for close lock at 1731243789657Running coprocessor pre-close hooks at 1731243789657Disabling compacts and flushes for region at 1731243789657Disabling writes for close at 1731243789657Obtaining lock to block concurrent updates at 1731243789658 (+1 ms)Preparing flush snapshotting stores in 098c1ce7807e5e33f69801e44af102e7 at 1731243789658Finished memstore snapshotting TestLogRolling-testLogRolling,,1731243764738.098c1ce7807e5e33f69801e44af102e7., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1731243789658Flushing stores of TestLogRolling-testLogRolling,,1731243764738.098c1ce7807e5e33f69801e44af102e7. at 1731243789659 (+1 ms)Flushing 098c1ce7807e5e33f69801e44af102e7/info: creating writer at 1731243789659Flushing 098c1ce7807e5e33f69801e44af102e7/info: appending metadata at 1731243789661 (+2 ms)Flushing 098c1ce7807e5e33f69801e44af102e7/info: closing flushed file at 1731243789661Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@42b84df4: reopening flushed file at 1731243789671 (+10 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 098c1ce7807e5e33f69801e44af102e7 in 20ms, sequenceid=123, compaction requested=true at 1731243789678 (+7 ms)Writing region close event to WAL at 1731243789688 (+10 ms)Running coprocessor post-close hooks at 1731243789692 (+4 ms)Closed at 1731243789692 2024-11-10T13:03:09,694 INFO [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed 098c1ce7807e5e33f69801e44af102e7 2024-11-10T13:03:09,695 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=098c1ce7807e5e33f69801e44af102e7, regionState=CLOSED 2024-11-10T13:03:09,697 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 098c1ce7807e5e33f69801e44af102e7, server=3857ccc89b65,42123,1731243763912 because future has completed 2024-11-10T13:03:09,699 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-10T13:03:09,699 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure 098c1ce7807e5e33f69801e44af102e7, server=3857ccc89b65,42123,1731243763912 in 198 msec 2024-11-10T13:03:09,702 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-10T13:03:09,702 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=098c1ce7807e5e33f69801e44af102e7, UNASSIGN in 204 msec 2024-11-10T13:03:09,708 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:03:09,712 INFO [PEWorker-4 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 4 storefiles, region=098c1ce7807e5e33f69801e44af102e7, threads=4 2024-11-10T13:03:09,714 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/info/5db1403c2ca84c3cb8c1c27e49dbcfa0 for region: 098c1ce7807e5e33f69801e44af102e7 2024-11-10T13:03:09,714 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/info/92b9d02f320d4ebfa942897275801c45 for region: 098c1ce7807e5e33f69801e44af102e7 2024-11-10T13:03:09,714 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/info/47ccaeb5b6884f12aeaacb38b6376da0 for region: 098c1ce7807e5e33f69801e44af102e7 2024-11-10T13:03:09,714 DEBUG [StoreFileSplitter-pool-3 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/info/d231067444d741cbba56d6c9bbee63bb for region: 098c1ce7807e5e33f69801e44af102e7 2024-11-10T13:03:09,724 DEBUG [StoreFileSplitter-pool-3 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/info/d231067444d741cbba56d6c9bbee63bb, top=true 2024-11-10T13:03:09,725 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/info/47ccaeb5b6884f12aeaacb38b6376da0, top=true 2024-11-10T13:03:09,725 DEBUG [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/info/5db1403c2ca84c3cb8c1c27e49dbcfa0, top=true 2024-11-10T13:03:09,732 INFO [StoreFileSplitter-pool-3 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/TestLogRolling-testLogRolling=098c1ce7807e5e33f69801e44af102e7-d231067444d741cbba56d6c9bbee63bb for child: 99786fe932a03a1a763ac98ba88ea15a, parent: 098c1ce7807e5e33f69801e44af102e7 2024-11-10T13:03:09,732 INFO [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/TestLogRolling-testLogRolling=098c1ce7807e5e33f69801e44af102e7-5db1403c2ca84c3cb8c1c27e49dbcfa0 for child: 99786fe932a03a1a763ac98ba88ea15a, parent: 098c1ce7807e5e33f69801e44af102e7 2024-11-10T13:03:09,733 DEBUG [StoreFileSplitter-pool-3 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/info/d231067444d741cbba56d6c9bbee63bb for region: 098c1ce7807e5e33f69801e44af102e7 2024-11-10T13:03:09,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44757 is added to blk_1073741847_1023 (size=27) 2024-11-10T13:03:09,733 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/info/5db1403c2ca84c3cb8c1c27e49dbcfa0 for region: 098c1ce7807e5e33f69801e44af102e7 2024-11-10T13:03:09,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46573 is added to blk_1073741847_1023 (size=27) 2024-11-10T13:03:09,736 INFO [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/TestLogRolling-testLogRolling=098c1ce7807e5e33f69801e44af102e7-47ccaeb5b6884f12aeaacb38b6376da0 for child: 99786fe932a03a1a763ac98ba88ea15a, parent: 098c1ce7807e5e33f69801e44af102e7 2024-11-10T13:03:09,736 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/info/47ccaeb5b6884f12aeaacb38b6376da0 for region: 098c1ce7807e5e33f69801e44af102e7 2024-11-10T13:03:09,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44757 is added to blk_1073741848_1024 (size=27) 2024-11-10T13:03:09,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46573 is added to blk_1073741848_1024 (size=27) 2024-11-10T13:03:09,743 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/info/92b9d02f320d4ebfa942897275801c45 for region: 098c1ce7807e5e33f69801e44af102e7 2024-11-10T13:03:09,745 DEBUG [PEWorker-4 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region 098c1ce7807e5e33f69801e44af102e7 Daughter A: [hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/36a7bbbdc6f9f69fcba5b0d63463cc2e/info/92b9d02f320d4ebfa942897275801c45.098c1ce7807e5e33f69801e44af102e7] storefiles, Daughter B: [hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/TestLogRolling-testLogRolling=098c1ce7807e5e33f69801e44af102e7-47ccaeb5b6884f12aeaacb38b6376da0, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/TestLogRolling-testLogRolling=098c1ce7807e5e33f69801e44af102e7-5db1403c2ca84c3cb8c1c27e49dbcfa0, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/92b9d02f320d4ebfa942897275801c45.098c1ce7807e5e33f69801e44af102e7, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/TestLogRolling-testLogRolling=098c1ce7807e5e33f69801e44af102e7-d231067444d741cbba56d6c9bbee63bb] storefiles. 2024-11-10T13:03:09,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46573 is added to blk_1073741849_1025 (size=71) 2024-11-10T13:03:09,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44757 is added to blk_1073741849_1025 (size=71) 2024-11-10T13:03:09,754 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:03:09,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46573 is added to blk_1073741850_1026 (size=71) 2024-11-10T13:03:09,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44757 is added to blk_1073741850_1026 (size=71) 2024-11-10T13:03:09,766 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:03:09,775 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/36a7bbbdc6f9f69fcba5b0d63463cc2e/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=-1 2024-11-10T13:03:09,776 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=-1 2024-11-10T13:03:09,779 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731243764738.098c1ce7807e5e33f69801e44af102e7.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731243789778"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1731243789778"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1731243789778"}]},"ts":"1731243789778"} 2024-11-10T13:03:09,779 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731243789484.36a7bbbdc6f9f69fcba5b0d63463cc2e.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731243789778"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731243789778"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731243789778"}]},"ts":"1731243789778"} 2024-11-10T13:03:09,779 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1731243789484.99786fe932a03a1a763ac98ba88ea15a.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731243789778"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731243789778"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731243789778"}]},"ts":"1731243789778"} 2024-11-10T13:03:09,795 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=36a7bbbdc6f9f69fcba5b0d63463cc2e, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=99786fe932a03a1a763ac98ba88ea15a, ASSIGN}] 2024-11-10T13:03:09,797 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=36a7bbbdc6f9f69fcba5b0d63463cc2e, ASSIGN 2024-11-10T13:03:09,797 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=99786fe932a03a1a763ac98ba88ea15a, ASSIGN 2024-11-10T13:03:09,798 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=36a7bbbdc6f9f69fcba5b0d63463cc2e, ASSIGN; state=SPLITTING_NEW, location=3857ccc89b65,42123,1731243763912; forceNewPlan=false, retain=false 2024-11-10T13:03:09,798 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=99786fe932a03a1a763ac98ba88ea15a, ASSIGN; state=SPLITTING_NEW, location=3857ccc89b65,42123,1731243763912; forceNewPlan=false, retain=false 2024-11-10T13:03:09,948 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=99786fe932a03a1a763ac98ba88ea15a, regionState=OPENING, regionLocation=3857ccc89b65,42123,1731243763912 2024-11-10T13:03:09,948 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=36a7bbbdc6f9f69fcba5b0d63463cc2e, regionState=OPENING, regionLocation=3857ccc89b65,42123,1731243763912 2024-11-10T13:03:09,951 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=99786fe932a03a1a763ac98ba88ea15a, ASSIGN because future has completed 2024-11-10T13:03:09,951 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 99786fe932a03a1a763ac98ba88ea15a, server=3857ccc89b65,42123,1731243763912}] 2024-11-10T13:03:09,952 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=36a7bbbdc6f9f69fcba5b0d63463cc2e, ASSIGN because future has completed 2024-11-10T13:03:09,953 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 36a7bbbdc6f9f69fcba5b0d63463cc2e, server=3857ccc89b65,42123,1731243763912}] 2024-11-10T13:03:10,019 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:10,019 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:10,107 INFO [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731243789484.36a7bbbdc6f9f69fcba5b0d63463cc2e. 2024-11-10T13:03:10,107 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => 36a7bbbdc6f9f69fcba5b0d63463cc2e, NAME => 'TestLogRolling-testLogRolling,,1731243789484.36a7bbbdc6f9f69fcba5b0d63463cc2e.', STARTKEY => '', ENDKEY => 'row0062'} 2024-11-10T13:03:10,108 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 36a7bbbdc6f9f69fcba5b0d63463cc2e 2024-11-10T13:03:10,108 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731243789484.36a7bbbdc6f9f69fcba5b0d63463cc2e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T13:03:10,108 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for 36a7bbbdc6f9f69fcba5b0d63463cc2e 2024-11-10T13:03:10,108 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for 36a7bbbdc6f9f69fcba5b0d63463cc2e 2024-11-10T13:03:10,109 INFO [StoreOpener-36a7bbbdc6f9f69fcba5b0d63463cc2e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 36a7bbbdc6f9f69fcba5b0d63463cc2e 2024-11-10T13:03:10,110 INFO [StoreOpener-36a7bbbdc6f9f69fcba5b0d63463cc2e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 36a7bbbdc6f9f69fcba5b0d63463cc2e columnFamilyName info 2024-11-10T13:03:10,110 DEBUG [StoreOpener-36a7bbbdc6f9f69fcba5b0d63463cc2e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:03:10,123 DEBUG [StoreOpener-36a7bbbdc6f9f69fcba5b0d63463cc2e-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/36a7bbbdc6f9f69fcba5b0d63463cc2e/info/92b9d02f320d4ebfa942897275801c45.098c1ce7807e5e33f69801e44af102e7->hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/info/92b9d02f320d4ebfa942897275801c45-bottom 2024-11-10T13:03:10,123 INFO [StoreOpener-36a7bbbdc6f9f69fcba5b0d63463cc2e-1 {}] regionserver.HStore(327): Store=36a7bbbdc6f9f69fcba5b0d63463cc2e/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T13:03:10,124 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for 36a7bbbdc6f9f69fcba5b0d63463cc2e 2024-11-10T13:03:10,124 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/36a7bbbdc6f9f69fcba5b0d63463cc2e 2024-11-10T13:03:10,125 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/36a7bbbdc6f9f69fcba5b0d63463cc2e 2024-11-10T13:03:10,126 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for 36a7bbbdc6f9f69fcba5b0d63463cc2e 2024-11-10T13:03:10,126 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for 36a7bbbdc6f9f69fcba5b0d63463cc2e 2024-11-10T13:03:10,127 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for 36a7bbbdc6f9f69fcba5b0d63463cc2e 2024-11-10T13:03:10,128 INFO [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened 36a7bbbdc6f9f69fcba5b0d63463cc2e; next sequenceid=127; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=721532, jitterRate=-0.08252555131912231}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-10T13:03:10,128 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 36a7bbbdc6f9f69fcba5b0d63463cc2e 2024-11-10T13:03:10,128 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for 36a7bbbdc6f9f69fcba5b0d63463cc2e: Running coprocessor pre-open hook at 1731243790108Writing region info on filesystem at 1731243790108Initializing all the Stores at 1731243790109 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731243790109Cleaning up temporary data from old regions at 1731243790126 (+17 ms)Running coprocessor post-open hooks at 1731243790128 (+2 ms)Region opened successfully at 1731243790128 2024-11-10T13:03:10,129 INFO [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731243789484.36a7bbbdc6f9f69fcba5b0d63463cc2e., pid=13, masterSystemTime=1731243790104 2024-11-10T13:03:10,129 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store 36a7bbbdc6f9f69fcba5b0d63463cc2e:info, priority=-2147483648, current under compaction store size is 1 2024-11-10T13:03:10,129 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-10T13:03:10,129 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-10T13:03:10,130 INFO [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1731243789484.36a7bbbdc6f9f69fcba5b0d63463cc2e. 2024-11-10T13:03:10,130 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.HStore(1541): 36a7bbbdc6f9f69fcba5b0d63463cc2e/info is initiating minor compaction (all files) 2024-11-10T13:03:10,130 INFO [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 36a7bbbdc6f9f69fcba5b0d63463cc2e/info in TestLogRolling-testLogRolling,,1731243789484.36a7bbbdc6f9f69fcba5b0d63463cc2e. 2024-11-10T13:03:10,130 INFO [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/36a7bbbdc6f9f69fcba5b0d63463cc2e/info/92b9d02f320d4ebfa942897275801c45.098c1ce7807e5e33f69801e44af102e7->hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/info/92b9d02f320d4ebfa942897275801c45-bottom] into tmpdir=hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/36a7bbbdc6f9f69fcba5b0d63463cc2e/.tmp, totalSize=73.6 K 2024-11-10T13:03:10,131 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] compactions.Compactor(225): Compacting 92b9d02f320d4ebfa942897275801c45.098c1ce7807e5e33f69801e44af102e7, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1731243774839 2024-11-10T13:03:10,131 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731243789484.36a7bbbdc6f9f69fcba5b0d63463cc2e. 2024-11-10T13:03:10,131 INFO [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731243789484.36a7bbbdc6f9f69fcba5b0d63463cc2e. 2024-11-10T13:03:10,132 INFO [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1731243789484.99786fe932a03a1a763ac98ba88ea15a. 2024-11-10T13:03:10,132 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => 99786fe932a03a1a763ac98ba88ea15a, NAME => 'TestLogRolling-testLogRolling,row0062,1731243789484.99786fe932a03a1a763ac98ba88ea15a.', STARTKEY => 'row0062', ENDKEY => ''} 2024-11-10T13:03:10,132 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 99786fe932a03a1a763ac98ba88ea15a 2024-11-10T13:03:10,132 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1731243789484.99786fe932a03a1a763ac98ba88ea15a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T13:03:10,132 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for 99786fe932a03a1a763ac98ba88ea15a 2024-11-10T13:03:10,132 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for 99786fe932a03a1a763ac98ba88ea15a 2024-11-10T13:03:10,132 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=36a7bbbdc6f9f69fcba5b0d63463cc2e, regionState=OPEN, openSeqNum=127, regionLocation=3857ccc89b65,42123,1731243763912 2024-11-10T13:03:10,133 INFO [StoreOpener-99786fe932a03a1a763ac98ba88ea15a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 99786fe932a03a1a763ac98ba88ea15a 2024-11-10T13:03:10,134 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42123 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-11-10T13:03:10,134 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-10T13:03:10,134 INFO [StoreOpener-99786fe932a03a1a763ac98ba88ea15a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 99786fe932a03a1a763ac98ba88ea15a columnFamilyName info 2024-11-10T13:03:10,134 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.11 KB heapSize=8.96 KB 2024-11-10T13:03:10,134 DEBUG [StoreOpener-99786fe932a03a1a763ac98ba88ea15a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:03:10,134 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 36a7bbbdc6f9f69fcba5b0d63463cc2e, server=3857ccc89b65,42123,1731243763912 because future has completed 2024-11-10T13:03:10,139 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=10 2024-11-10T13:03:10,139 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure 36a7bbbdc6f9f69fcba5b0d63463cc2e, server=3857ccc89b65,42123,1731243763912 in 184 msec 2024-11-10T13:03:10,141 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=36a7bbbdc6f9f69fcba5b0d63463cc2e, ASSIGN in 344 msec 2024-11-10T13:03:10,145 DEBUG [StoreOpener-99786fe932a03a1a763ac98ba88ea15a-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/92b9d02f320d4ebfa942897275801c45.098c1ce7807e5e33f69801e44af102e7->hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/info/92b9d02f320d4ebfa942897275801c45-top 2024-11-10T13:03:10,149 DEBUG [StoreOpener-99786fe932a03a1a763ac98ba88ea15a-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/TestLogRolling-testLogRolling=098c1ce7807e5e33f69801e44af102e7-47ccaeb5b6884f12aeaacb38b6376da0 2024-11-10T13:03:10,151 INFO [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 36a7bbbdc6f9f69fcba5b0d63463cc2e#info#compaction#66 average throughput is 31.30 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-10T13:03:10,151 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/36a7bbbdc6f9f69fcba5b0d63463cc2e/.tmp/info/fe162d4a94f74d2c838caad970c9e116 is 1080, key is row0001/info:/1731243774839/Put/seqid=0 2024-11-10T13:03:10,153 DEBUG [StoreOpener-99786fe932a03a1a763ac98ba88ea15a-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/TestLogRolling-testLogRolling=098c1ce7807e5e33f69801e44af102e7-5db1403c2ca84c3cb8c1c27e49dbcfa0 2024-11-10T13:03:10,157 DEBUG [StoreOpener-99786fe932a03a1a763ac98ba88ea15a-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/TestLogRolling-testLogRolling=098c1ce7807e5e33f69801e44af102e7-d231067444d741cbba56d6c9bbee63bb 2024-11-10T13:03:10,158 INFO [StoreOpener-99786fe932a03a1a763ac98ba88ea15a-1 {}] regionserver.HStore(327): Store=99786fe932a03a1a763ac98ba88ea15a/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T13:03:10,158 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for 99786fe932a03a1a763ac98ba88ea15a 2024-11-10T13:03:10,158 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/hbase/meta/1588230740/.tmp/info/f15922376ce84565abee8bba646d7ffb is 193, key is TestLogRolling-testLogRolling,row0062,1731243789484.99786fe932a03a1a763ac98ba88ea15a./info:regioninfo/1731243789948/Put/seqid=0 2024-11-10T13:03:10,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44757 is added to blk_1073741851_1027 (size=70862) 2024-11-10T13:03:10,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46573 is added to blk_1073741851_1027 (size=70862) 2024-11-10T13:03:10,159 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a 2024-11-10T13:03:10,160 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a 2024-11-10T13:03:10,161 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for 99786fe932a03a1a763ac98ba88ea15a 2024-11-10T13:03:10,161 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for 99786fe932a03a1a763ac98ba88ea15a 2024-11-10T13:03:10,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44757 is added to blk_1073741852_1028 (size=9847) 2024-11-10T13:03:10,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46573 is added to blk_1073741852_1028 (size=9847) 2024-11-10T13:03:10,164 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for 99786fe932a03a1a763ac98ba88ea15a 2024-11-10T13:03:10,164 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.92 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/hbase/meta/1588230740/.tmp/info/f15922376ce84565abee8bba646d7ffb 2024-11-10T13:03:10,165 INFO [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened 99786fe932a03a1a763ac98ba88ea15a; next sequenceid=127; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=713017, jitterRate=-0.09335207939147949}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-10T13:03:10,165 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 99786fe932a03a1a763ac98ba88ea15a 2024-11-10T13:03:10,165 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for 99786fe932a03a1a763ac98ba88ea15a: Running coprocessor pre-open hook at 1731243790132Writing region info on filesystem at 1731243790132Initializing all the Stores at 1731243790133 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731243790133Cleaning up temporary data from old regions at 1731243790161 (+28 ms)Running coprocessor post-open hooks at 1731243790165 (+4 ms)Region opened successfully at 1731243790165 2024-11-10T13:03:10,166 INFO [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1731243789484.99786fe932a03a1a763ac98ba88ea15a., pid=12, masterSystemTime=1731243790104 2024-11-10T13:03:10,166 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/36a7bbbdc6f9f69fcba5b0d63463cc2e/.tmp/info/fe162d4a94f74d2c838caad970c9e116 as hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/36a7bbbdc6f9f69fcba5b0d63463cc2e/info/fe162d4a94f74d2c838caad970c9e116 2024-11-10T13:03:10,166 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store 99786fe932a03a1a763ac98ba88ea15a:info, priority=-2147483648, current under compaction store size is 2 2024-11-10T13:03:10,166 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-10T13:03:10,166 DEBUG [RS:0;3857ccc89b65:42123-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-10T13:03:10,169 INFO [RS:0;3857ccc89b65:42123-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1731243789484.99786fe932a03a1a763ac98ba88ea15a. 2024-11-10T13:03:10,169 DEBUG [RS:0;3857ccc89b65:42123-longCompactions-0 {}] regionserver.HStore(1541): 99786fe932a03a1a763ac98ba88ea15a/info is initiating minor compaction (all files) 2024-11-10T13:03:10,169 INFO [RS:0;3857ccc89b65:42123-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 99786fe932a03a1a763ac98ba88ea15a/info in TestLogRolling-testLogRolling,row0062,1731243789484.99786fe932a03a1a763ac98ba88ea15a. 2024-11-10T13:03:10,169 DEBUG [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1731243789484.99786fe932a03a1a763ac98ba88ea15a. 2024-11-10T13:03:10,169 INFO [RS_OPEN_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1731243789484.99786fe932a03a1a763ac98ba88ea15a. 2024-11-10T13:03:10,169 INFO [RS:0;3857ccc89b65:42123-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/92b9d02f320d4ebfa942897275801c45.098c1ce7807e5e33f69801e44af102e7->hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/info/92b9d02f320d4ebfa942897275801c45-top, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/TestLogRolling-testLogRolling=098c1ce7807e5e33f69801e44af102e7-5db1403c2ca84c3cb8c1c27e49dbcfa0, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/TestLogRolling-testLogRolling=098c1ce7807e5e33f69801e44af102e7-47ccaeb5b6884f12aeaacb38b6376da0, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/TestLogRolling-testLogRolling=098c1ce7807e5e33f69801e44af102e7-d231067444d741cbba56d6c9bbee63bb] into tmpdir=hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/.tmp, totalSize=120.8 K 2024-11-10T13:03:10,170 DEBUG [RS:0;3857ccc89b65:42123-longCompactions-0 {}] compactions.Compactor(225): Compacting 92b9d02f320d4ebfa942897275801c45.098c1ce7807e5e33f69801e44af102e7, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=83, earliestPutTs=1731243774839 2024-11-10T13:03:10,170 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=99786fe932a03a1a763ac98ba88ea15a, regionState=OPEN, openSeqNum=127, regionLocation=3857ccc89b65,42123,1731243763912 2024-11-10T13:03:10,170 DEBUG [RS:0;3857ccc89b65:42123-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=098c1ce7807e5e33f69801e44af102e7-5db1403c2ca84c3cb8c1c27e49dbcfa0, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=99, earliestPutTs=1731243789017 2024-11-10T13:03:10,171 DEBUG [RS:0;3857ccc89b65:42123-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=098c1ce7807e5e33f69801e44af102e7-47ccaeb5b6884f12aeaacb38b6376da0, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1731243789042 2024-11-10T13:03:10,171 DEBUG [RS:0;3857ccc89b65:42123-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=098c1ce7807e5e33f69801e44af102e7-d231067444d741cbba56d6c9bbee63bb, keycount=3, bloomtype=ROW, size=8.0 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1731243789068 2024-11-10T13:03:10,172 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 99786fe932a03a1a763ac98ba88ea15a, server=3857ccc89b65,42123,1731243763912 because future has completed 2024-11-10T13:03:10,175 INFO [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in 36a7bbbdc6f9f69fcba5b0d63463cc2e/info of 36a7bbbdc6f9f69fcba5b0d63463cc2e into fe162d4a94f74d2c838caad970c9e116(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-10T13:03:10,175 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 36a7bbbdc6f9f69fcba5b0d63463cc2e: 2024-11-10T13:03:10,175 INFO [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731243789484.36a7bbbdc6f9f69fcba5b0d63463cc2e., storeName=36a7bbbdc6f9f69fcba5b0d63463cc2e/info, priority=15, startTime=1731243790129; duration=0sec 2024-11-10T13:03:10,175 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-10T13:03:10,175 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 36a7bbbdc6f9f69fcba5b0d63463cc2e:info 2024-11-10T13:03:10,185 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-10T13:03:10,185 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure 99786fe932a03a1a763ac98ba88ea15a, server=3857ccc89b65,42123,1731243763912 in 231 msec 2024-11-10T13:03:10,186 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/hbase/meta/1588230740/.tmp/ns/ffde71c8c1f44177be582604bb72ab81 is 43, key is default/ns:d/1731243764692/Put/seqid=0 2024-11-10T13:03:10,187 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=7 2024-11-10T13:03:10,187 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=99786fe932a03a1a763ac98ba88ea15a, ASSIGN in 390 msec 2024-11-10T13:03:10,189 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=098c1ce7807e5e33f69801e44af102e7, daughterA=36a7bbbdc6f9f69fcba5b0d63463cc2e, daughterB=99786fe932a03a1a763ac98ba88ea15a in 703 msec 2024-11-10T13:03:10,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46573 is added to blk_1073741853_1029 (size=5153) 2024-11-10T13:03:10,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44757 is added to blk_1073741853_1029 (size=5153) 2024-11-10T13:03:10,192 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/hbase/meta/1588230740/.tmp/ns/ffde71c8c1f44177be582604bb72ab81 2024-11-10T13:03:10,201 INFO [RS:0;3857ccc89b65:42123-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 99786fe932a03a1a763ac98ba88ea15a#info#compaction#69 average throughput is 35.92 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-10T13:03:10,202 DEBUG [RS:0;3857ccc89b65:42123-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/.tmp/info/73370c015ddf462e91f398b67952e588 is 1080, key is row0062/info:/1731243787009/Put/seqid=0 2024-11-10T13:03:10,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46573 is added to blk_1073741854_1030 (size=43081) 2024-11-10T13:03:10,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44757 is added to blk_1073741854_1030 (size=43081) 2024-11-10T13:03:10,219 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/hbase/meta/1588230740/.tmp/table/3e51845f3a654252a43201f84d380b42 is 65, key is TestLogRolling-testLogRolling/table:state/1731243765098/Put/seqid=0 2024-11-10T13:03:10,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46573 is added to blk_1073741855_1031 (size=5340) 2024-11-10T13:03:10,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44757 is added to blk_1073741855_1031 (size=5340) 2024-11-10T13:03:10,229 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/hbase/meta/1588230740/.tmp/table/3e51845f3a654252a43201f84d380b42 2024-11-10T13:03:10,236 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/hbase/meta/1588230740/.tmp/info/f15922376ce84565abee8bba646d7ffb as hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/hbase/meta/1588230740/info/f15922376ce84565abee8bba646d7ffb 2024-11-10T13:03:10,242 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/hbase/meta/1588230740/info/f15922376ce84565abee8bba646d7ffb, entries=30, sequenceid=17, filesize=9.6 K 2024-11-10T13:03:10,243 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/hbase/meta/1588230740/.tmp/ns/ffde71c8c1f44177be582604bb72ab81 as hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/hbase/meta/1588230740/ns/ffde71c8c1f44177be582604bb72ab81 2024-11-10T13:03:10,247 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/hbase/meta/1588230740/ns/ffde71c8c1f44177be582604bb72ab81, entries=2, sequenceid=17, filesize=5.0 K 2024-11-10T13:03:10,249 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/hbase/meta/1588230740/.tmp/table/3e51845f3a654252a43201f84d380b42 as hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/hbase/meta/1588230740/table/3e51845f3a654252a43201f84d380b42 2024-11-10T13:03:10,254 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/hbase/meta/1588230740/table/3e51845f3a654252a43201f84d380b42, entries=2, sequenceid=17, filesize=5.2 K 2024-11-10T13:03:10,255 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.11 KB/5234, heapSize ~8.66 KB/8872, currentSize=705 B/705 for 1588230740 in 121ms, sequenceid=17, compaction requested=false 2024-11-10T13:03:10,256 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-10T13:03:10,618 DEBUG [RS:0;3857ccc89b65:42123-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/.tmp/info/73370c015ddf462e91f398b67952e588 as hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/73370c015ddf462e91f398b67952e588 2024-11-10T13:03:10,625 INFO [RS:0;3857ccc89b65:42123-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 4 (all) file(s) in 99786fe932a03a1a763ac98ba88ea15a/info of 99786fe932a03a1a763ac98ba88ea15a into 73370c015ddf462e91f398b67952e588(size=42.1 K), total size for store is 42.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-10T13:03:10,625 DEBUG [RS:0;3857ccc89b65:42123-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 99786fe932a03a1a763ac98ba88ea15a: 2024-11-10T13:03:10,625 INFO [RS:0;3857ccc89b65:42123-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731243789484.99786fe932a03a1a763ac98ba88ea15a., storeName=99786fe932a03a1a763ac98ba88ea15a/info, priority=12, startTime=1731243790166; duration=0sec 2024-11-10T13:03:10,625 DEBUG [RS:0;3857ccc89b65:42123-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-10T13:03:10,625 DEBUG [RS:0;3857ccc89b65:42123-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99786fe932a03a1a763ac98ba88ea15a:info 2024-11-10T13:03:11,020 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:11,020 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:11,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42123 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:34104 deadline: 1731243801074, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731243764738.098c1ce7807e5e33f69801e44af102e7. is not online on 3857ccc89b65,42123,1731243763912 2024-11-10T13:03:11,075 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1731243764738.098c1ce7807e5e33f69801e44af102e7., hostname=3857ccc89b65,42123,1731243763912, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1731243764738.098c1ce7807e5e33f69801e44af102e7., hostname=3857ccc89b65,42123,1731243763912, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731243764738.098c1ce7807e5e33f69801e44af102e7. is not online on 3857ccc89b65,42123,1731243763912 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-10T13:03:11,075 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1731243764738.098c1ce7807e5e33f69801e44af102e7., hostname=3857ccc89b65,42123,1731243763912, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731243764738.098c1ce7807e5e33f69801e44af102e7. is not online on 3857ccc89b65,42123,1731243763912 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-10T13:03:11,075 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1731243764738.098c1ce7807e5e33f69801e44af102e7., hostname=3857ccc89b65,42123,1731243763912, seqNum=2 from cache 2024-11-10T13:03:12,020 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:12,020 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:13,021 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:13,021 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:13,854 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-10T13:03:14,022 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:14,022 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:14,693 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:03:14,693 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:03:14,693 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:03:14,693 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:03:14,693 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:03:14,693 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:03:14,694 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:03:14,694 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:03:14,712 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:03:14,712 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:03:14,712 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:03:14,712 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:03:14,712 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:03:14,713 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:03:14,715 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:03:14,715 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:03:14,715 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:03:14,717 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:03:15,022 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:15,022 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:15,224 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-10T13:03:15,225 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:03:15,225 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:03:15,225 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:03:15,225 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:03:15,225 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:03:15,225 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:03:15,226 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:03:15,226 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:03:15,245 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:03:15,245 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:03:15,245 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:03:15,246 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:03:15,246 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:03:15,246 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:03:15,249 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:03:15,249 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:03:15,249 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:03:15,251 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-10T13:03:16,023 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:16,023 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:17,023 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:17,023 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:18,024 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:18,024 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:19,025 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:19,025 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:20,025 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:20,025 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:21,026 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:21,026 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:21,118 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0097', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1731243789484.99786fe932a03a1a763ac98ba88ea15a., hostname=3857ccc89b65,42123,1731243763912, seqNum=127] 2024-11-10T13:03:21,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42123 {}] regionserver.HRegion(8855): Flush requested on 99786fe932a03a1a763ac98ba88ea15a 2024-11-10T13:03:21,129 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 99786fe932a03a1a763ac98ba88ea15a 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-10T13:03:21,134 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/.tmp/info/84e529f3b62c434085e397c897c07023 is 1080, key is row0097/info:/1731243801119/Put/seqid=0 2024-11-10T13:03:21,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44757 is added to blk_1073741856_1032 (size=12516) 2024-11-10T13:03:21,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46573 is added to blk_1073741856_1032 (size=12516) 2024-11-10T13:03:21,140 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=137 (bloomFilter=true), to=hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/.tmp/info/84e529f3b62c434085e397c897c07023 2024-11-10T13:03:21,146 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/.tmp/info/84e529f3b62c434085e397c897c07023 as hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/84e529f3b62c434085e397c897c07023 2024-11-10T13:03:21,152 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/84e529f3b62c434085e397c897c07023, entries=7, sequenceid=137, filesize=12.2 K 2024-11-10T13:03:21,170 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=14.71 KB/15064 for 99786fe932a03a1a763ac98ba88ea15a in 41ms, sequenceid=137, compaction requested=false 2024-11-10T13:03:21,170 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 99786fe932a03a1a763ac98ba88ea15a: 2024-11-10T13:03:21,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42123 {}] regionserver.HRegion(8855): Flush requested on 99786fe932a03a1a763ac98ba88ea15a 2024-11-10T13:03:21,171 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 99786fe932a03a1a763ac98ba88ea15a 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-10T13:03:21,175 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/.tmp/info/b8d7b1204c374ec6887ce056366d5d12 is 1080, key is row0104/info:/1731243801130/Put/seqid=0 2024-11-10T13:03:21,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46573 is added to blk_1073741857_1033 (size=21156) 2024-11-10T13:03:21,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44757 is added to blk_1073741857_1033 (size=21156) 2024-11-10T13:03:21,182 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/.tmp/info/b8d7b1204c374ec6887ce056366d5d12 2024-11-10T13:03:21,188 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/.tmp/info/b8d7b1204c374ec6887ce056366d5d12 as hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/b8d7b1204c374ec6887ce056366d5d12 2024-11-10T13:03:21,193 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/b8d7b1204c374ec6887ce056366d5d12, entries=15, sequenceid=155, filesize=20.7 K 2024-11-10T13:03:21,194 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=10.51 KB/10760 for 99786fe932a03a1a763ac98ba88ea15a in 23ms, sequenceid=155, compaction requested=true 2024-11-10T13:03:21,194 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 99786fe932a03a1a763ac98ba88ea15a: 2024-11-10T13:03:21,194 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99786fe932a03a1a763ac98ba88ea15a:info, priority=-2147483648, current under compaction store size is 1 2024-11-10T13:03:21,194 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-10T13:03:21,194 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-10T13:03:21,195 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 76753 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-10T13:03:21,195 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.HStore(1541): 99786fe932a03a1a763ac98ba88ea15a/info is initiating minor compaction (all files) 2024-11-10T13:03:21,196 INFO [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 99786fe932a03a1a763ac98ba88ea15a/info in TestLogRolling-testLogRolling,row0062,1731243789484.99786fe932a03a1a763ac98ba88ea15a. 2024-11-10T13:03:21,196 INFO [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/73370c015ddf462e91f398b67952e588, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/84e529f3b62c434085e397c897c07023, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/b8d7b1204c374ec6887ce056366d5d12] into tmpdir=hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/.tmp, totalSize=75.0 K 2024-11-10T13:03:21,196 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] compactions.Compactor(225): Compacting 73370c015ddf462e91f398b67952e588, keycount=35, bloomtype=ROW, size=42.1 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1731243787009 2024-11-10T13:03:21,196 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] compactions.Compactor(225): Compacting 84e529f3b62c434085e397c897c07023, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1731243801119 2024-11-10T13:03:21,197 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] compactions.Compactor(225): Compacting b8d7b1204c374ec6887ce056366d5d12, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1731243801130 2024-11-10T13:03:21,209 INFO [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 99786fe932a03a1a763ac98ba88ea15a#info#compaction#73 average throughput is 29.25 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-10T13:03:21,210 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/.tmp/info/d465eae96cac4053997f3e87760ce605 is 1080, key is row0062/info:/1731243787009/Put/seqid=0 2024-11-10T13:03:21,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46573 is added to blk_1073741858_1034 (size=66967) 2024-11-10T13:03:21,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44757 is added to blk_1073741858_1034 (size=66967) 2024-11-10T13:03:21,221 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/.tmp/info/d465eae96cac4053997f3e87760ce605 as hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/d465eae96cac4053997f3e87760ce605 2024-11-10T13:03:21,227 INFO [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 99786fe932a03a1a763ac98ba88ea15a/info of 99786fe932a03a1a763ac98ba88ea15a into d465eae96cac4053997f3e87760ce605(size=65.4 K), total size for store is 65.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-10T13:03:21,227 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 99786fe932a03a1a763ac98ba88ea15a: 2024-11-10T13:03:21,227 INFO [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731243789484.99786fe932a03a1a763ac98ba88ea15a., storeName=99786fe932a03a1a763ac98ba88ea15a/info, priority=13, startTime=1731243801194; duration=0sec 2024-11-10T13:03:21,227 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-10T13:03:21,227 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99786fe932a03a1a763ac98ba88ea15a:info 2024-11-10T13:03:22,026 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:22,026 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:23,027 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:23,027 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:23,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42123 {}] regionserver.HRegion(8855): Flush requested on 99786fe932a03a1a763ac98ba88ea15a 2024-11-10T13:03:23,191 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 99786fe932a03a1a763ac98ba88ea15a 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-10T13:03:23,195 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/.tmp/info/ad3a695504f642e6a06ad38b02d0349d is 1080, key is row0119/info:/1731243801173/Put/seqid=0 2024-11-10T13:03:23,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46573 is added to blk_1073741859_1035 (size=16828) 2024-11-10T13:03:23,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44757 is added to blk_1073741859_1035 (size=16828) 2024-11-10T13:03:23,201 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/.tmp/info/ad3a695504f642e6a06ad38b02d0349d 2024-11-10T13:03:23,206 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/.tmp/info/ad3a695504f642e6a06ad38b02d0349d as hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/ad3a695504f642e6a06ad38b02d0349d 2024-11-10T13:03:23,212 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/ad3a695504f642e6a06ad38b02d0349d, entries=11, sequenceid=170, filesize=16.4 K 2024-11-10T13:03:23,212 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=13.66 KB/13988 for 99786fe932a03a1a763ac98ba88ea15a in 21ms, sequenceid=170, compaction requested=false 2024-11-10T13:03:23,212 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 99786fe932a03a1a763ac98ba88ea15a: 2024-11-10T13:03:23,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42123 {}] regionserver.HRegion(8855): Flush requested on 99786fe932a03a1a763ac98ba88ea15a 2024-11-10T13:03:23,213 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 99786fe932a03a1a763ac98ba88ea15a 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-10T13:03:23,218 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/.tmp/info/eddfa523bfdd4496b2078edb0f7c82fd is 1080, key is row0130/info:/1731243803192/Put/seqid=0 2024-11-10T13:03:23,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46573 is added to blk_1073741860_1036 (size=20078) 2024-11-10T13:03:23,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44757 is added to blk_1073741860_1036 (size=20078) 2024-11-10T13:03:23,225 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=187 (bloomFilter=true), to=hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/.tmp/info/eddfa523bfdd4496b2078edb0f7c82fd 2024-11-10T13:03:23,231 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/.tmp/info/eddfa523bfdd4496b2078edb0f7c82fd as hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/eddfa523bfdd4496b2078edb0f7c82fd 2024-11-10T13:03:23,236 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/eddfa523bfdd4496b2078edb0f7c82fd, entries=14, sequenceid=187, filesize=19.6 K 2024-11-10T13:03:23,237 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=15.76 KB/16140 for 99786fe932a03a1a763ac98ba88ea15a in 24ms, sequenceid=187, compaction requested=true 2024-11-10T13:03:23,237 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 99786fe932a03a1a763ac98ba88ea15a: 2024-11-10T13:03:23,237 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99786fe932a03a1a763ac98ba88ea15a:info, priority=-2147483648, current under compaction store size is 1 2024-11-10T13:03:23,237 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-10T13:03:23,237 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-10T13:03:23,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42123 {}] regionserver.HRegion(8855): Flush requested on 99786fe932a03a1a763ac98ba88ea15a 2024-11-10T13:03:23,238 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 99786fe932a03a1a763ac98ba88ea15a 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-11-10T13:03:23,238 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 103873 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-10T13:03:23,239 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.HStore(1541): 99786fe932a03a1a763ac98ba88ea15a/info is initiating minor compaction (all files) 2024-11-10T13:03:23,239 INFO [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 99786fe932a03a1a763ac98ba88ea15a/info in TestLogRolling-testLogRolling,row0062,1731243789484.99786fe932a03a1a763ac98ba88ea15a. 2024-11-10T13:03:23,239 INFO [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/d465eae96cac4053997f3e87760ce605, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/ad3a695504f642e6a06ad38b02d0349d, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/eddfa523bfdd4496b2078edb0f7c82fd] into tmpdir=hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/.tmp, totalSize=101.4 K 2024-11-10T13:03:23,239 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] compactions.Compactor(225): Compacting d465eae96cac4053997f3e87760ce605, keycount=57, bloomtype=ROW, size=65.4 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1731243787009 2024-11-10T13:03:23,240 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] compactions.Compactor(225): Compacting ad3a695504f642e6a06ad38b02d0349d, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1731243801173 2024-11-10T13:03:23,240 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] compactions.Compactor(225): Compacting eddfa523bfdd4496b2078edb0f7c82fd, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=187, earliestPutTs=1731243803192 2024-11-10T13:03:23,242 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/.tmp/info/195f61ac61a04e9ea5d0f816846ff486 is 1080, key is row0144/info:/1731243803214/Put/seqid=0 2024-11-10T13:03:23,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44757 is added to blk_1073741861_1037 (size=22238) 2024-11-10T13:03:23,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46573 is added to blk_1073741861_1037 (size=22238) 2024-11-10T13:03:23,247 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=206 (bloomFilter=true), to=hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/.tmp/info/195f61ac61a04e9ea5d0f816846ff486 2024-11-10T13:03:23,252 INFO [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 99786fe932a03a1a763ac98ba88ea15a#info#compaction#77 average throughput is 42.07 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-10T13:03:23,252 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/.tmp/info/195f61ac61a04e9ea5d0f816846ff486 as hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/195f61ac61a04e9ea5d0f816846ff486 2024-11-10T13:03:23,253 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/.tmp/info/b694824dcff74f53adb2603001bb0d22 is 1080, key is row0062/info:/1731243787009/Put/seqid=0 2024-11-10T13:03:23,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44757 is added to blk_1073741862_1038 (size=94096) 2024-11-10T13:03:23,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46573 is added to blk_1073741862_1038 (size=94096) 2024-11-10T13:03:23,258 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/195f61ac61a04e9ea5d0f816846ff486, entries=16, sequenceid=206, filesize=21.7 K 2024-11-10T13:03:23,259 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=1.05 KB/1076 for 99786fe932a03a1a763ac98ba88ea15a in 22ms, sequenceid=206, compaction requested=false 2024-11-10T13:03:23,259 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 99786fe932a03a1a763ac98ba88ea15a: 2024-11-10T13:03:23,262 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/.tmp/info/b694824dcff74f53adb2603001bb0d22 as hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/b694824dcff74f53adb2603001bb0d22 2024-11-10T13:03:23,267 INFO [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 99786fe932a03a1a763ac98ba88ea15a/info of 99786fe932a03a1a763ac98ba88ea15a into b694824dcff74f53adb2603001bb0d22(size=91.9 K), total size for store is 113.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-10T13:03:23,267 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 99786fe932a03a1a763ac98ba88ea15a: 2024-11-10T13:03:23,267 INFO [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731243789484.99786fe932a03a1a763ac98ba88ea15a., storeName=99786fe932a03a1a763ac98ba88ea15a/info, priority=13, startTime=1731243803237; duration=0sec 2024-11-10T13:03:23,267 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-10T13:03:23,267 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99786fe932a03a1a763ac98ba88ea15a:info 2024-11-10T13:03:24,027 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:24,027 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:24,834 INFO [master/3857ccc89b65:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-10T13:03:24,834 INFO [master/3857ccc89b65:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-10T13:03:25,028 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:25,028 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:25,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42123 {}] regionserver.HRegion(8855): Flush requested on 99786fe932a03a1a763ac98ba88ea15a 2024-11-10T13:03:25,249 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 99786fe932a03a1a763ac98ba88ea15a 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-10T13:03:25,253 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/.tmp/info/dad4053dd8294c9ba92430e50ec0f432 is 1080, key is row0160/info:/1731243803239/Put/seqid=0 2024-11-10T13:03:25,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46573 is added to blk_1073741863_1039 (size=12516) 2024-11-10T13:03:25,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44757 is added to blk_1073741863_1039 (size=12516) 2024-11-10T13:03:25,258 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=217 (bloomFilter=true), to=hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/.tmp/info/dad4053dd8294c9ba92430e50ec0f432 2024-11-10T13:03:25,263 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/.tmp/info/dad4053dd8294c9ba92430e50ec0f432 as hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/dad4053dd8294c9ba92430e50ec0f432 2024-11-10T13:03:25,268 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/dad4053dd8294c9ba92430e50ec0f432, entries=7, sequenceid=217, filesize=12.2 K 2024-11-10T13:03:25,269 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=13.66 KB/13988 for 99786fe932a03a1a763ac98ba88ea15a in 20ms, sequenceid=217, compaction requested=true 2024-11-10T13:03:25,269 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 99786fe932a03a1a763ac98ba88ea15a: 2024-11-10T13:03:25,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42123 {}] regionserver.HRegion(8855): Flush requested on 99786fe932a03a1a763ac98ba88ea15a 2024-11-10T13:03:25,269 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99786fe932a03a1a763ac98ba88ea15a:info, priority=-2147483648, current under compaction store size is 1 2024-11-10T13:03:25,269 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-10T13:03:25,269 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-10T13:03:25,269 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 99786fe932a03a1a763ac98ba88ea15a 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-10T13:03:25,271 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 128850 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-10T13:03:25,271 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.HStore(1541): 99786fe932a03a1a763ac98ba88ea15a/info is initiating minor compaction (all files) 2024-11-10T13:03:25,271 INFO [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 99786fe932a03a1a763ac98ba88ea15a/info in TestLogRolling-testLogRolling,row0062,1731243789484.99786fe932a03a1a763ac98ba88ea15a. 2024-11-10T13:03:25,271 INFO [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/b694824dcff74f53adb2603001bb0d22, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/195f61ac61a04e9ea5d0f816846ff486, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/dad4053dd8294c9ba92430e50ec0f432] into tmpdir=hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/.tmp, totalSize=125.8 K 2024-11-10T13:03:25,271 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] compactions.Compactor(225): Compacting b694824dcff74f53adb2603001bb0d22, keycount=82, bloomtype=ROW, size=91.9 K, encoding=NONE, compression=NONE, seqNum=187, earliestPutTs=1731243787009 2024-11-10T13:03:25,272 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] compactions.Compactor(225): Compacting 195f61ac61a04e9ea5d0f816846ff486, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=206, earliestPutTs=1731243803214 2024-11-10T13:03:25,272 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] compactions.Compactor(225): Compacting dad4053dd8294c9ba92430e50ec0f432, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=217, earliestPutTs=1731243803239 2024-11-10T13:03:25,273 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/.tmp/info/6f30b96f027948cf991f22bfeabbb58a is 1080, key is row0167/info:/1731243805249/Put/seqid=0 2024-11-10T13:03:25,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44757 is added to blk_1073741864_1040 (size=20078) 2024-11-10T13:03:25,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46573 is added to blk_1073741864_1040 (size=20078) 2024-11-10T13:03:25,280 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=234 (bloomFilter=true), to=hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/.tmp/info/6f30b96f027948cf991f22bfeabbb58a 2024-11-10T13:03:25,283 INFO [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 99786fe932a03a1a763ac98ba88ea15a#info#compaction#80 average throughput is 53.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-10T13:03:25,284 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/.tmp/info/ec6c4920b87c496fa642cacd37048768 is 1080, key is row0062/info:/1731243787009/Put/seqid=0 2024-11-10T13:03:25,285 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/.tmp/info/6f30b96f027948cf991f22bfeabbb58a as hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/6f30b96f027948cf991f22bfeabbb58a 2024-11-10T13:03:25,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46573 is added to blk_1073741865_1041 (size=118996) 2024-11-10T13:03:25,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44757 is added to blk_1073741865_1041 (size=118996) 2024-11-10T13:03:25,291 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/6f30b96f027948cf991f22bfeabbb58a, entries=14, sequenceid=234, filesize=19.6 K 2024-11-10T13:03:25,292 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=12.61 KB/12912 for 99786fe932a03a1a763ac98ba88ea15a in 23ms, sequenceid=234, compaction requested=false 2024-11-10T13:03:25,292 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 99786fe932a03a1a763ac98ba88ea15a: 2024-11-10T13:03:25,293 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/.tmp/info/ec6c4920b87c496fa642cacd37048768 as hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/ec6c4920b87c496fa642cacd37048768 2024-11-10T13:03:25,298 INFO [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 99786fe932a03a1a763ac98ba88ea15a/info of 99786fe932a03a1a763ac98ba88ea15a into ec6c4920b87c496fa642cacd37048768(size=116.2 K), total size for store is 135.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-10T13:03:25,298 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 99786fe932a03a1a763ac98ba88ea15a: 2024-11-10T13:03:25,298 INFO [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731243789484.99786fe932a03a1a763ac98ba88ea15a., storeName=99786fe932a03a1a763ac98ba88ea15a/info, priority=13, startTime=1731243805269; duration=0sec 2024-11-10T13:03:25,298 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-10T13:03:25,298 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99786fe932a03a1a763ac98ba88ea15a:info 2024-11-10T13:03:26,029 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:26,029 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:27,029 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:27,029 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:27,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42123 {}] regionserver.HRegion(8855): Flush requested on 99786fe932a03a1a763ac98ba88ea15a 2024-11-10T13:03:27,294 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 99786fe932a03a1a763ac98ba88ea15a 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-10T13:03:27,298 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/.tmp/info/e305920fe202488f9bbabb87423c50d2 is 1080, key is row0181/info:/1731243805270/Put/seqid=0 2024-11-10T13:03:27,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46573 is added to blk_1073741866_1042 (size=19000) 2024-11-10T13:03:27,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44757 is added to blk_1073741866_1042 (size=19000) 2024-11-10T13:03:27,304 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/.tmp/info/e305920fe202488f9bbabb87423c50d2 2024-11-10T13:03:27,309 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/.tmp/info/e305920fe202488f9bbabb87423c50d2 as hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/e305920fe202488f9bbabb87423c50d2 2024-11-10T13:03:27,315 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/e305920fe202488f9bbabb87423c50d2, entries=13, sequenceid=251, filesize=18.6 K 2024-11-10T13:03:27,316 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=14.71 KB/15064 for 99786fe932a03a1a763ac98ba88ea15a in 21ms, sequenceid=251, compaction requested=true 2024-11-10T13:03:27,316 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 99786fe932a03a1a763ac98ba88ea15a: 2024-11-10T13:03:27,316 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99786fe932a03a1a763ac98ba88ea15a:info, priority=-2147483648, current under compaction store size is 1 2024-11-10T13:03:27,316 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-10T13:03:27,316 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-10T13:03:27,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42123 {}] regionserver.HRegion(8855): Flush requested on 99786fe932a03a1a763ac98ba88ea15a 2024-11-10T13:03:27,317 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 158074 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-10T13:03:27,317 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 99786fe932a03a1a763ac98ba88ea15a 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-10T13:03:27,317 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.HStore(1541): 99786fe932a03a1a763ac98ba88ea15a/info is initiating minor compaction (all files) 2024-11-10T13:03:27,317 INFO [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 99786fe932a03a1a763ac98ba88ea15a/info in TestLogRolling-testLogRolling,row0062,1731243789484.99786fe932a03a1a763ac98ba88ea15a. 2024-11-10T13:03:27,317 INFO [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/ec6c4920b87c496fa642cacd37048768, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/6f30b96f027948cf991f22bfeabbb58a, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/e305920fe202488f9bbabb87423c50d2] into tmpdir=hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/.tmp, totalSize=154.4 K 2024-11-10T13:03:27,318 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] compactions.Compactor(225): Compacting ec6c4920b87c496fa642cacd37048768, keycount=105, bloomtype=ROW, size=116.2 K, encoding=NONE, compression=NONE, seqNum=217, earliestPutTs=1731243787009 2024-11-10T13:03:27,318 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6f30b96f027948cf991f22bfeabbb58a, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1731243805249 2024-11-10T13:03:27,318 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] compactions.Compactor(225): Compacting e305920fe202488f9bbabb87423c50d2, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1731243805270 2024-11-10T13:03:27,321 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/.tmp/info/9a88b1be1fb944f9bd7776b4e1b715ac is 1080, key is row0194/info:/1731243807295/Put/seqid=0 2024-11-10T13:03:27,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44757 is added to blk_1073741867_1043 (size=21168) 2024-11-10T13:03:27,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46573 is added to blk_1073741867_1043 (size=21168) 2024-11-10T13:03:27,329 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=269 (bloomFilter=true), to=hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/.tmp/info/9a88b1be1fb944f9bd7776b4e1b715ac 2024-11-10T13:03:27,332 INFO [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 99786fe932a03a1a763ac98ba88ea15a#info#compaction#83 average throughput is 45.15 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-10T13:03:27,333 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/.tmp/info/62235200d984403fa4a49ca41662ed17 is 1080, key is row0062/info:/1731243787009/Put/seqid=0 2024-11-10T13:03:27,336 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/.tmp/info/9a88b1be1fb944f9bd7776b4e1b715ac as hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/9a88b1be1fb944f9bd7776b4e1b715ac 2024-11-10T13:03:27,341 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/9a88b1be1fb944f9bd7776b4e1b715ac, entries=15, sequenceid=269, filesize=20.7 K 2024-11-10T13:03:27,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46573 is added to blk_1073741868_1044 (size=148409) 2024-11-10T13:03:27,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44757 is added to blk_1073741868_1044 (size=148409) 2024-11-10T13:03:27,342 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=12.61 KB/12912 for 99786fe932a03a1a763ac98ba88ea15a in 25ms, sequenceid=269, compaction requested=false 2024-11-10T13:03:27,342 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 99786fe932a03a1a763ac98ba88ea15a: 2024-11-10T13:03:27,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42123 {}] regionserver.HRegion(8855): Flush requested on 99786fe932a03a1a763ac98ba88ea15a 2024-11-10T13:03:27,343 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 99786fe932a03a1a763ac98ba88ea15a 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-10T13:03:27,347 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/.tmp/info/51c7ebd6b1964ea8bf5263179d9f0fd0 is 1080, key is row0209/info:/1731243807318/Put/seqid=0 2024-11-10T13:03:27,348 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/.tmp/info/62235200d984403fa4a49ca41662ed17 as hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/62235200d984403fa4a49ca41662ed17 2024-11-10T13:03:27,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44757 is added to blk_1073741869_1045 (size=19013) 2024-11-10T13:03:27,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46573 is added to blk_1073741869_1045 (size=19013) 2024-11-10T13:03:27,353 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=285 (bloomFilter=true), to=hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/.tmp/info/51c7ebd6b1964ea8bf5263179d9f0fd0 2024-11-10T13:03:27,354 INFO [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 99786fe932a03a1a763ac98ba88ea15a/info of 99786fe932a03a1a763ac98ba88ea15a into 62235200d984403fa4a49ca41662ed17(size=144.9 K), total size for store is 165.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-10T13:03:27,354 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 99786fe932a03a1a763ac98ba88ea15a: 2024-11-10T13:03:27,354 INFO [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731243789484.99786fe932a03a1a763ac98ba88ea15a., storeName=99786fe932a03a1a763ac98ba88ea15a/info, priority=13, startTime=1731243807316; duration=0sec 2024-11-10T13:03:27,354 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-10T13:03:27,354 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99786fe932a03a1a763ac98ba88ea15a:info 2024-11-10T13:03:27,357 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/.tmp/info/51c7ebd6b1964ea8bf5263179d9f0fd0 as hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/51c7ebd6b1964ea8bf5263179d9f0fd0 2024-11-10T13:03:27,361 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/51c7ebd6b1964ea8bf5263179d9f0fd0, entries=13, sequenceid=285, filesize=18.6 K 2024-11-10T13:03:27,362 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=3.15 KB/3228 for 99786fe932a03a1a763ac98ba88ea15a in 19ms, sequenceid=285, compaction requested=true 2024-11-10T13:03:27,362 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 99786fe932a03a1a763ac98ba88ea15a: 2024-11-10T13:03:27,362 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99786fe932a03a1a763ac98ba88ea15a:info, priority=-2147483648, current under compaction store size is 1 2024-11-10T13:03:27,362 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-10T13:03:27,362 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-10T13:03:27,363 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 188590 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-10T13:03:27,363 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.HStore(1541): 99786fe932a03a1a763ac98ba88ea15a/info is initiating minor compaction (all files) 2024-11-10T13:03:27,363 INFO [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 99786fe932a03a1a763ac98ba88ea15a/info in TestLogRolling-testLogRolling,row0062,1731243789484.99786fe932a03a1a763ac98ba88ea15a. 2024-11-10T13:03:27,363 INFO [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/62235200d984403fa4a49ca41662ed17, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/9a88b1be1fb944f9bd7776b4e1b715ac, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/51c7ebd6b1964ea8bf5263179d9f0fd0] into tmpdir=hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/.tmp, totalSize=184.2 K 2024-11-10T13:03:27,364 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] compactions.Compactor(225): Compacting 62235200d984403fa4a49ca41662ed17, keycount=132, bloomtype=ROW, size=144.9 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1731243787009 2024-11-10T13:03:27,364 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9a88b1be1fb944f9bd7776b4e1b715ac, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=269, earliestPutTs=1731243807295 2024-11-10T13:03:27,364 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] compactions.Compactor(225): Compacting 51c7ebd6b1964ea8bf5263179d9f0fd0, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1731243807318 2024-11-10T13:03:27,374 INFO [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 99786fe932a03a1a763ac98ba88ea15a#info#compaction#85 average throughput is 82.09 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-10T13:03:27,375 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/.tmp/info/5d186fc826614e6daa234154972a2307 is 1080, key is row0062/info:/1731243787009/Put/seqid=0 2024-11-10T13:03:27,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46573 is added to blk_1073741870_1046 (size=178728) 2024-11-10T13:03:27,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44757 is added to blk_1073741870_1046 (size=178728) 2024-11-10T13:03:27,383 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/.tmp/info/5d186fc826614e6daa234154972a2307 as hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/5d186fc826614e6daa234154972a2307 2024-11-10T13:03:27,389 INFO [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 99786fe932a03a1a763ac98ba88ea15a/info of 99786fe932a03a1a763ac98ba88ea15a into 5d186fc826614e6daa234154972a2307(size=174.5 K), total size for store is 174.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-10T13:03:27,389 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 99786fe932a03a1a763ac98ba88ea15a: 2024-11-10T13:03:27,389 INFO [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731243789484.99786fe932a03a1a763ac98ba88ea15a., storeName=99786fe932a03a1a763ac98ba88ea15a/info, priority=13, startTime=1731243807362; duration=0sec 2024-11-10T13:03:27,389 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-10T13:03:27,389 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99786fe932a03a1a763ac98ba88ea15a:info 2024-11-10T13:03:28,030 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:28,030 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:29,030 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:29,030 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:29,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42123 {}] regionserver.HRegion(8855): Flush requested on 99786fe932a03a1a763ac98ba88ea15a 2024-11-10T13:03:29,356 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 99786fe932a03a1a763ac98ba88ea15a 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-10T13:03:29,360 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/.tmp/info/fb1ecdd2922944fb8d1ab2108adc1820 is 1080, key is row0222/info:/1731243807344/Put/seqid=0 2024-11-10T13:03:29,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44757 is added to blk_1073741871_1047 (size=12523) 2024-11-10T13:03:29,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46573 is added to blk_1073741871_1047 (size=12523) 2024-11-10T13:03:29,369 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=297 (bloomFilter=true), to=hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/.tmp/info/fb1ecdd2922944fb8d1ab2108adc1820 2024-11-10T13:03:29,374 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/.tmp/info/fb1ecdd2922944fb8d1ab2108adc1820 as hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/fb1ecdd2922944fb8d1ab2108adc1820 2024-11-10T13:03:29,379 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/fb1ecdd2922944fb8d1ab2108adc1820, entries=7, sequenceid=297, filesize=12.2 K 2024-11-10T13:03:29,380 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=15.76 KB/16140 for 99786fe932a03a1a763ac98ba88ea15a in 24ms, sequenceid=297, compaction requested=false 2024-11-10T13:03:29,380 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 99786fe932a03a1a763ac98ba88ea15a: 2024-11-10T13:03:29,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42123 {}] regionserver.HRegion(8855): Flush requested on 99786fe932a03a1a763ac98ba88ea15a 2024-11-10T13:03:29,381 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 99786fe932a03a1a763ac98ba88ea15a 1/1 column families, dataSize=17.86 KB heapSize=19.38 KB 2024-11-10T13:03:29,385 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/.tmp/info/f69b3b570c914d04b7bf81175ff96897 is 1080, key is row0229/info:/1731243809356/Put/seqid=0 2024-11-10T13:03:29,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46573 is added to blk_1073741872_1048 (size=23333) 2024-11-10T13:03:29,390 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.86 KB at sequenceid=317 (bloomFilter=true), to=hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/.tmp/info/f69b3b570c914d04b7bf81175ff96897 2024-11-10T13:03:29,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44757 is added to blk_1073741872_1048 (size=23333) 2024-11-10T13:03:29,395 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/.tmp/info/f69b3b570c914d04b7bf81175ff96897 as hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/f69b3b570c914d04b7bf81175ff96897 2024-11-10T13:03:29,400 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/f69b3b570c914d04b7bf81175ff96897, entries=17, sequenceid=317, filesize=22.8 K 2024-11-10T13:03:29,401 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~17.86 KB/18292, heapSize ~19.36 KB/19824, currentSize=11.56 KB/11836 for 99786fe932a03a1a763ac98ba88ea15a in 20ms, sequenceid=317, compaction requested=true 2024-11-10T13:03:29,401 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 99786fe932a03a1a763ac98ba88ea15a: 2024-11-10T13:03:29,401 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99786fe932a03a1a763ac98ba88ea15a:info, priority=-2147483648, current under compaction store size is 1 2024-11-10T13:03:29,401 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-10T13:03:29,401 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-10T13:03:29,402 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 214584 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-10T13:03:29,402 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.HStore(1541): 99786fe932a03a1a763ac98ba88ea15a/info is initiating minor compaction (all files) 2024-11-10T13:03:29,402 INFO [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 99786fe932a03a1a763ac98ba88ea15a/info in TestLogRolling-testLogRolling,row0062,1731243789484.99786fe932a03a1a763ac98ba88ea15a. 2024-11-10T13:03:29,402 INFO [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/5d186fc826614e6daa234154972a2307, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/fb1ecdd2922944fb8d1ab2108adc1820, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/f69b3b570c914d04b7bf81175ff96897] into tmpdir=hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/.tmp, totalSize=209.6 K 2024-11-10T13:03:29,402 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5d186fc826614e6daa234154972a2307, keycount=160, bloomtype=ROW, size=174.5 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1731243787009 2024-11-10T13:03:29,403 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] compactions.Compactor(225): Compacting fb1ecdd2922944fb8d1ab2108adc1820, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=297, earliestPutTs=1731243807344 2024-11-10T13:03:29,403 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] compactions.Compactor(225): Compacting f69b3b570c914d04b7bf81175ff96897, keycount=17, bloomtype=ROW, size=22.8 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1731243809356 2024-11-10T13:03:29,416 INFO [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 99786fe932a03a1a763ac98ba88ea15a#info#compaction#88 average throughput is 47.20 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-10T13:03:29,417 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/.tmp/info/c43eaf72ff1047c4bdd35b291720a8b6 is 1080, key is row0062/info:/1731243787009/Put/seqid=0 2024-11-10T13:03:29,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44757 is added to blk_1073741873_1049 (size=204803) 2024-11-10T13:03:29,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46573 is added to blk_1073741873_1049 (size=204803) 2024-11-10T13:03:29,425 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/.tmp/info/c43eaf72ff1047c4bdd35b291720a8b6 as hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/c43eaf72ff1047c4bdd35b291720a8b6 2024-11-10T13:03:29,430 INFO [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 99786fe932a03a1a763ac98ba88ea15a/info of 99786fe932a03a1a763ac98ba88ea15a into c43eaf72ff1047c4bdd35b291720a8b6(size=200.0 K), total size for store is 200.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-10T13:03:29,430 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 99786fe932a03a1a763ac98ba88ea15a: 2024-11-10T13:03:29,431 INFO [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731243789484.99786fe932a03a1a763ac98ba88ea15a., storeName=99786fe932a03a1a763ac98ba88ea15a/info, priority=13, startTime=1731243809401; duration=0sec 2024-11-10T13:03:29,431 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-10T13:03:29,431 DEBUG [RS:0;3857ccc89b65:42123-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99786fe932a03a1a763ac98ba88ea15a:info 2024-11-10T13:03:29,661 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20340 2024-11-10T13:03:30,031 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:30,031 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:31,032 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:31,032 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:31,398 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-11-10T13:03:31,398 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3857ccc89b65%2C42123%2C1731243763912.1731243811398 2024-11-10T13:03:31,404 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:03:31,404 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:03:31,404 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:03:31,404 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:03:31,404 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:03:31,404 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/WALs/3857ccc89b65,42123,1731243763912/3857ccc89b65%2C42123%2C1731243763912.1731243764292 with entries=310, filesize=307.89 KB; new WAL /user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/WALs/3857ccc89b65,42123,1731243763912/3857ccc89b65%2C42123%2C1731243763912.1731243811398 2024-11-10T13:03:31,405 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33133:33133),(127.0.0.1/127.0.0.1:37721:37721)] 2024-11-10T13:03:31,405 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/WALs/3857ccc89b65,42123,1731243763912/3857ccc89b65%2C42123%2C1731243763912.1731243764292 is not closed yet, will try archiving it next time 2024-11-10T13:03:31,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46573 is added to blk_1073741833_1009 (size=315283) 2024-11-10T13:03:31,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44757 is added to blk_1073741833_1009 (size=315283) 2024-11-10T13:03:31,410 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 99786fe932a03a1a763ac98ba88ea15a 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-10T13:03:31,414 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/.tmp/info/3240f8416b134820a9610d627ff08f68 is 1080, key is row0246/info:/1731243809382/Put/seqid=0 2024-11-10T13:03:31,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46573 is added to blk_1073741875_1051 (size=16839) 2024-11-10T13:03:31,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44757 is added to blk_1073741875_1051 (size=16839) 2024-11-10T13:03:31,418 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/.tmp/info/3240f8416b134820a9610d627ff08f68 2024-11-10T13:03:31,423 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/.tmp/info/3240f8416b134820a9610d627ff08f68 as hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/3240f8416b134820a9610d627ff08f68 2024-11-10T13:03:31,428 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/3240f8416b134820a9610d627ff08f68, entries=11, sequenceid=332, filesize=16.4 K 2024-11-10T13:03:31,429 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=0 B/0 for 99786fe932a03a1a763ac98ba88ea15a in 19ms, sequenceid=332, compaction requested=false 2024-11-10T13:03:31,429 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 99786fe932a03a1a763ac98ba88ea15a: 2024-11-10T13:03:31,429 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=705 B heapSize=2.05 KB 2024-11-10T13:03:31,433 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/hbase/meta/1588230740/.tmp/info/32ceab26944742b892d26369fe8bd8d3 is 193, key is TestLogRolling-testLogRolling,row0062,1731243789484.99786fe932a03a1a763ac98ba88ea15a./info:regioninfo/1731243790170/Put/seqid=0 2024-11-10T13:03:31,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44757 is added to blk_1073741876_1052 (size=6223) 2024-11-10T13:03:31,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46573 is added to blk_1073741876_1052 (size=6223) 2024-11-10T13:03:31,438 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=705 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/hbase/meta/1588230740/.tmp/info/32ceab26944742b892d26369fe8bd8d3 2024-11-10T13:03:31,443 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/hbase/meta/1588230740/.tmp/info/32ceab26944742b892d26369fe8bd8d3 as hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/hbase/meta/1588230740/info/32ceab26944742b892d26369fe8bd8d3 2024-11-10T13:03:31,447 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/hbase/meta/1588230740/info/32ceab26944742b892d26369fe8bd8d3, entries=5, sequenceid=21, filesize=6.1 K 2024-11-10T13:03:31,448 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~705 B/705, heapSize ~1.29 KB/1320, currentSize=0 B/0 for 1588230740 in 19ms, sequenceid=21, compaction requested=false 2024-11-10T13:03:31,448 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-10T13:03:31,449 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 36a7bbbdc6f9f69fcba5b0d63463cc2e: 2024-11-10T13:03:31,449 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3857ccc89b65%2C42123%2C1731243763912.1731243811449 2024-11-10T13:03:31,453 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:03:31,453 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:03:31,453 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:03:31,453 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:03:31,454 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:03:31,454 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/WALs/3857ccc89b65,42123,1731243763912/3857ccc89b65%2C42123%2C1731243763912.1731243811398 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/WALs/3857ccc89b65,42123,1731243763912/3857ccc89b65%2C42123%2C1731243763912.1731243811449 2024-11-10T13:03:31,454 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37721:37721),(127.0.0.1/127.0.0.1:33133:33133)] 2024-11-10T13:03:31,454 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/WALs/3857ccc89b65,42123,1731243763912/3857ccc89b65%2C42123%2C1731243763912.1731243811398 is not closed yet, will try archiving it next time 2024-11-10T13:03:31,455 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/WALs/3857ccc89b65,42123,1731243763912/3857ccc89b65%2C42123%2C1731243763912.1731243764292 to hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/oldWALs/3857ccc89b65%2C42123%2C1731243763912.1731243764292 2024-11-10T13:03:31,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46573 is added to blk_1073741874_1050 (size=731) 2024-11-10T13:03:31,455 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-11-10T13:03:31,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44757 is added to blk_1073741874_1050 (size=731) 2024-11-10T13:03:31,456 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/WALs/3857ccc89b65,42123,1731243763912/3857ccc89b65%2C42123%2C1731243763912.1731243811398 to hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/oldWALs/3857ccc89b65%2C42123%2C1731243763912.1731243811398 2024-11-10T13:03:31,555 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-10T13:03:31,556 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-10T13:03:31,556 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T13:03:31,556 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T13:03:31,556 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T13:03:31,556 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-10T13:03:31,556 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-10T13:03:31,556 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1615194389, stopped=false 2024-11-10T13:03:31,556 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=3857ccc89b65,44609,1731243763869 2024-11-10T13:03:31,559 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42123-0x10101f9531f0001, quorum=127.0.0.1:54757, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-10T13:03:31,559 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44609-0x10101f9531f0000, quorum=127.0.0.1:54757, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-10T13:03:31,559 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42123-0x10101f9531f0001, quorum=127.0.0.1:54757, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:03:31,559 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44609-0x10101f9531f0000, quorum=127.0.0.1:54757, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:03:31,559 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-10T13:03:31,559 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-10T13:03:31,559 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T13:03:31,560 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T13:03:31,560 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3857ccc89b65,42123,1731243763912' ***** 2024-11-10T13:03:31,560 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-10T13:03:31,560 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42123-0x10101f9531f0001, quorum=127.0.0.1:54757, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T13:03:31,560 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:44609-0x10101f9531f0000, quorum=127.0.0.1:54757, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T13:03:31,561 INFO [RS:0;3857ccc89b65:42123 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-10T13:03:31,561 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-10T13:03:31,561 INFO [RS:0;3857ccc89b65:42123 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-10T13:03:31,561 INFO [RS:0;3857ccc89b65:42123 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-10T13:03:31,561 INFO [RS:0;3857ccc89b65:42123 {}] regionserver.HRegionServer(3091): Received CLOSE for 99786fe932a03a1a763ac98ba88ea15a 2024-11-10T13:03:31,561 INFO [RS:0;3857ccc89b65:42123 {}] regionserver.HRegionServer(3091): Received CLOSE for 36a7bbbdc6f9f69fcba5b0d63463cc2e 2024-11-10T13:03:31,561 INFO [RS:0;3857ccc89b65:42123 {}] regionserver.HRegionServer(959): stopping server 3857ccc89b65,42123,1731243763912 2024-11-10T13:03:31,561 INFO [RS:0;3857ccc89b65:42123 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-10T13:03:31,561 INFO [RS:0;3857ccc89b65:42123 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;3857ccc89b65:42123. 2024-11-10T13:03:31,561 DEBUG [RS:0;3857ccc89b65:42123 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T13:03:31,561 DEBUG [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 99786fe932a03a1a763ac98ba88ea15a, disabling compactions & flushes 2024-11-10T13:03:31,561 DEBUG [RS:0;3857ccc89b65:42123 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T13:03:31,561 INFO [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1731243789484.99786fe932a03a1a763ac98ba88ea15a. 2024-11-10T13:03:31,561 DEBUG [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1731243789484.99786fe932a03a1a763ac98ba88ea15a. 2024-11-10T13:03:31,561 DEBUG [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1731243789484.99786fe932a03a1a763ac98ba88ea15a. after waiting 0 ms 2024-11-10T13:03:31,561 INFO [RS:0;3857ccc89b65:42123 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-10T13:03:31,561 DEBUG [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1731243789484.99786fe932a03a1a763ac98ba88ea15a. 2024-11-10T13:03:31,561 INFO [RS:0;3857ccc89b65:42123 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-10T13:03:31,561 INFO [RS:0;3857ccc89b65:42123 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-10T13:03:31,561 INFO [RS:0;3857ccc89b65:42123 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-10T13:03:31,562 INFO [RS:0;3857ccc89b65:42123 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-10T13:03:31,562 DEBUG [RS:0;3857ccc89b65:42123 {}] regionserver.HRegionServer(1325): Online Regions={99786fe932a03a1a763ac98ba88ea15a=TestLogRolling-testLogRolling,row0062,1731243789484.99786fe932a03a1a763ac98ba88ea15a., 1588230740=hbase:meta,,1.1588230740, 36a7bbbdc6f9f69fcba5b0d63463cc2e=TestLogRolling-testLogRolling,,1731243789484.36a7bbbdc6f9f69fcba5b0d63463cc2e.} 2024-11-10T13:03:31,562 DEBUG [RS:0;3857ccc89b65:42123 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 36a7bbbdc6f9f69fcba5b0d63463cc2e, 99786fe932a03a1a763ac98ba88ea15a 2024-11-10T13:03:31,562 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-10T13:03:31,562 INFO [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-10T13:03:31,562 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-10T13:03:31,562 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-10T13:03:31,562 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-10T13:03:31,562 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731243789484.99786fe932a03a1a763ac98ba88ea15a.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/92b9d02f320d4ebfa942897275801c45.098c1ce7807e5e33f69801e44af102e7->hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/info/92b9d02f320d4ebfa942897275801c45-top, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/TestLogRolling-testLogRolling=098c1ce7807e5e33f69801e44af102e7-5db1403c2ca84c3cb8c1c27e49dbcfa0, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/TestLogRolling-testLogRolling=098c1ce7807e5e33f69801e44af102e7-47ccaeb5b6884f12aeaacb38b6376da0, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/73370c015ddf462e91f398b67952e588, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/TestLogRolling-testLogRolling=098c1ce7807e5e33f69801e44af102e7-d231067444d741cbba56d6c9bbee63bb, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/84e529f3b62c434085e397c897c07023, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/d465eae96cac4053997f3e87760ce605, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/b8d7b1204c374ec6887ce056366d5d12, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/ad3a695504f642e6a06ad38b02d0349d, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/b694824dcff74f53adb2603001bb0d22, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/eddfa523bfdd4496b2078edb0f7c82fd, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/195f61ac61a04e9ea5d0f816846ff486, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/ec6c4920b87c496fa642cacd37048768, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/dad4053dd8294c9ba92430e50ec0f432, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/6f30b96f027948cf991f22bfeabbb58a, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/62235200d984403fa4a49ca41662ed17, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/e305920fe202488f9bbabb87423c50d2, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/9a88b1be1fb944f9bd7776b4e1b715ac, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/5d186fc826614e6daa234154972a2307, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/51c7ebd6b1964ea8bf5263179d9f0fd0, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/fb1ecdd2922944fb8d1ab2108adc1820, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/f69b3b570c914d04b7bf81175ff96897] to archive 2024-11-10T13:03:31,563 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731243789484.99786fe932a03a1a763ac98ba88ea15a.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-10T13:03:31,565 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731243789484.99786fe932a03a1a763ac98ba88ea15a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/92b9d02f320d4ebfa942897275801c45.098c1ce7807e5e33f69801e44af102e7 to hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/archive/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/92b9d02f320d4ebfa942897275801c45.098c1ce7807e5e33f69801e44af102e7 2024-11-10T13:03:31,566 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731243789484.99786fe932a03a1a763ac98ba88ea15a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/TestLogRolling-testLogRolling=098c1ce7807e5e33f69801e44af102e7-5db1403c2ca84c3cb8c1c27e49dbcfa0 to hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/archive/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/TestLogRolling-testLogRolling=098c1ce7807e5e33f69801e44af102e7-5db1403c2ca84c3cb8c1c27e49dbcfa0 2024-11-10T13:03:31,566 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-10T13:03:31,567 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-10T13:03:31,567 INFO [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-10T13:03:31,567 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731243811562Running coprocessor pre-close hooks at 1731243811562Disabling compacts and flushes for region at 1731243811562Disabling writes for close at 1731243811562Writing region close event to WAL at 1731243811563 (+1 ms)Running coprocessor post-close hooks at 1731243811567 (+4 ms)Closed at 1731243811567 2024-11-10T13:03:31,567 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-10T13:03:31,567 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731243789484.99786fe932a03a1a763ac98ba88ea15a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/TestLogRolling-testLogRolling=098c1ce7807e5e33f69801e44af102e7-47ccaeb5b6884f12aeaacb38b6376da0 to hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/archive/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/TestLogRolling-testLogRolling=098c1ce7807e5e33f69801e44af102e7-47ccaeb5b6884f12aeaacb38b6376da0 2024-11-10T13:03:31,568 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731243789484.99786fe932a03a1a763ac98ba88ea15a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/73370c015ddf462e91f398b67952e588 to hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/archive/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/73370c015ddf462e91f398b67952e588 2024-11-10T13:03:31,569 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731243789484.99786fe932a03a1a763ac98ba88ea15a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/TestLogRolling-testLogRolling=098c1ce7807e5e33f69801e44af102e7-d231067444d741cbba56d6c9bbee63bb to hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/archive/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/TestLogRolling-testLogRolling=098c1ce7807e5e33f69801e44af102e7-d231067444d741cbba56d6c9bbee63bb 2024-11-10T13:03:31,570 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731243789484.99786fe932a03a1a763ac98ba88ea15a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/84e529f3b62c434085e397c897c07023 to hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/archive/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/84e529f3b62c434085e397c897c07023 2024-11-10T13:03:31,572 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731243789484.99786fe932a03a1a763ac98ba88ea15a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/d465eae96cac4053997f3e87760ce605 to hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/archive/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/d465eae96cac4053997f3e87760ce605 2024-11-10T13:03:31,573 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731243789484.99786fe932a03a1a763ac98ba88ea15a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/b8d7b1204c374ec6887ce056366d5d12 to hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/archive/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/b8d7b1204c374ec6887ce056366d5d12 2024-11-10T13:03:31,574 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731243789484.99786fe932a03a1a763ac98ba88ea15a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/ad3a695504f642e6a06ad38b02d0349d to hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/archive/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/ad3a695504f642e6a06ad38b02d0349d 2024-11-10T13:03:31,575 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731243789484.99786fe932a03a1a763ac98ba88ea15a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/b694824dcff74f53adb2603001bb0d22 to hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/archive/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/b694824dcff74f53adb2603001bb0d22 2024-11-10T13:03:31,576 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731243789484.99786fe932a03a1a763ac98ba88ea15a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/eddfa523bfdd4496b2078edb0f7c82fd to hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/archive/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/eddfa523bfdd4496b2078edb0f7c82fd 2024-11-10T13:03:31,577 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731243789484.99786fe932a03a1a763ac98ba88ea15a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/195f61ac61a04e9ea5d0f816846ff486 to hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/archive/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/195f61ac61a04e9ea5d0f816846ff486 2024-11-10T13:03:31,577 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731243789484.99786fe932a03a1a763ac98ba88ea15a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/ec6c4920b87c496fa642cacd37048768 to hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/archive/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/ec6c4920b87c496fa642cacd37048768 2024-11-10T13:03:31,578 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731243789484.99786fe932a03a1a763ac98ba88ea15a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/dad4053dd8294c9ba92430e50ec0f432 to hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/archive/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/dad4053dd8294c9ba92430e50ec0f432 2024-11-10T13:03:31,579 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731243789484.99786fe932a03a1a763ac98ba88ea15a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/6f30b96f027948cf991f22bfeabbb58a to hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/archive/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/6f30b96f027948cf991f22bfeabbb58a 2024-11-10T13:03:31,580 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731243789484.99786fe932a03a1a763ac98ba88ea15a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/62235200d984403fa4a49ca41662ed17 to hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/archive/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/62235200d984403fa4a49ca41662ed17 2024-11-10T13:03:31,581 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731243789484.99786fe932a03a1a763ac98ba88ea15a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/e305920fe202488f9bbabb87423c50d2 to hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/archive/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/e305920fe202488f9bbabb87423c50d2 2024-11-10T13:03:31,582 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731243789484.99786fe932a03a1a763ac98ba88ea15a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/9a88b1be1fb944f9bd7776b4e1b715ac to hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/archive/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/9a88b1be1fb944f9bd7776b4e1b715ac 2024-11-10T13:03:31,583 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731243789484.99786fe932a03a1a763ac98ba88ea15a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/5d186fc826614e6daa234154972a2307 to hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/archive/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/5d186fc826614e6daa234154972a2307 2024-11-10T13:03:31,584 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731243789484.99786fe932a03a1a763ac98ba88ea15a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/51c7ebd6b1964ea8bf5263179d9f0fd0 to hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/archive/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/51c7ebd6b1964ea8bf5263179d9f0fd0 2024-11-10T13:03:31,585 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731243789484.99786fe932a03a1a763ac98ba88ea15a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/fb1ecdd2922944fb8d1ab2108adc1820 to hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/archive/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/fb1ecdd2922944fb8d1ab2108adc1820 2024-11-10T13:03:31,586 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731243789484.99786fe932a03a1a763ac98ba88ea15a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/f69b3b570c914d04b7bf81175ff96897 to hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/archive/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/info/f69b3b570c914d04b7bf81175ff96897 2024-11-10T13:03:31,587 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731243789484.99786fe932a03a1a763ac98ba88ea15a.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=3857ccc89b65:44609 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-10T13:03:31,587 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1731243789484.99786fe932a03a1a763ac98ba88ea15a.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [73370c015ddf462e91f398b67952e588=43081, 84e529f3b62c434085e397c897c07023=12516, d465eae96cac4053997f3e87760ce605=66967, b8d7b1204c374ec6887ce056366d5d12=21156, ad3a695504f642e6a06ad38b02d0349d=16828, b694824dcff74f53adb2603001bb0d22=94096, eddfa523bfdd4496b2078edb0f7c82fd=20078, 195f61ac61a04e9ea5d0f816846ff486=22238, ec6c4920b87c496fa642cacd37048768=118996, dad4053dd8294c9ba92430e50ec0f432=12516, 6f30b96f027948cf991f22bfeabbb58a=20078, 62235200d984403fa4a49ca41662ed17=148409, e305920fe202488f9bbabb87423c50d2=19000, 9a88b1be1fb944f9bd7776b4e1b715ac=21168, 5d186fc826614e6daa234154972a2307=178728, 51c7ebd6b1964ea8bf5263179d9f0fd0=19013, fb1ecdd2922944fb8d1ab2108adc1820=12523, f69b3b570c914d04b7bf81175ff96897=23333] 2024-11-10T13:03:31,590 DEBUG [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/99786fe932a03a1a763ac98ba88ea15a/recovered.edits/335.seqid, newMaxSeqId=335, maxSeqId=126 2024-11-10T13:03:31,591 INFO [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1731243789484.99786fe932a03a1a763ac98ba88ea15a. 2024-11-10T13:03:31,591 DEBUG [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 99786fe932a03a1a763ac98ba88ea15a: Waiting for close lock at 1731243811561Running coprocessor pre-close hooks at 1731243811561Disabling compacts and flushes for region at 1731243811561Disabling writes for close at 1731243811561Writing region close event to WAL at 1731243811587 (+26 ms)Running coprocessor post-close hooks at 1731243811590 (+3 ms)Closed at 1731243811591 (+1 ms) 2024-11-10T13:03:31,591 DEBUG [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1731243789484.99786fe932a03a1a763ac98ba88ea15a. 2024-11-10T13:03:31,591 DEBUG [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 36a7bbbdc6f9f69fcba5b0d63463cc2e, disabling compactions & flushes 2024-11-10T13:03:31,591 INFO [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731243789484.36a7bbbdc6f9f69fcba5b0d63463cc2e. 2024-11-10T13:03:31,591 DEBUG [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731243789484.36a7bbbdc6f9f69fcba5b0d63463cc2e. 2024-11-10T13:03:31,591 DEBUG [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731243789484.36a7bbbdc6f9f69fcba5b0d63463cc2e. after waiting 0 ms 2024-11-10T13:03:31,591 DEBUG [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731243789484.36a7bbbdc6f9f69fcba5b0d63463cc2e. 2024-11-10T13:03:31,591 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731243789484.36a7bbbdc6f9f69fcba5b0d63463cc2e.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/36a7bbbdc6f9f69fcba5b0d63463cc2e/info/92b9d02f320d4ebfa942897275801c45.098c1ce7807e5e33f69801e44af102e7->hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/098c1ce7807e5e33f69801e44af102e7/info/92b9d02f320d4ebfa942897275801c45-bottom] to archive 2024-11-10T13:03:31,592 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731243789484.36a7bbbdc6f9f69fcba5b0d63463cc2e.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-10T13:03:31,593 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731243789484.36a7bbbdc6f9f69fcba5b0d63463cc2e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/36a7bbbdc6f9f69fcba5b0d63463cc2e/info/92b9d02f320d4ebfa942897275801c45.098c1ce7807e5e33f69801e44af102e7 to hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/archive/data/default/TestLogRolling-testLogRolling/36a7bbbdc6f9f69fcba5b0d63463cc2e/info/92b9d02f320d4ebfa942897275801c45.098c1ce7807e5e33f69801e44af102e7 2024-11-10T13:03:31,593 WARN [StoreCloser-TestLogRolling-testLogRolling,,1731243789484.36a7bbbdc6f9f69fcba5b0d63463cc2e.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-11-10T13:03:31,596 DEBUG [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/data/default/TestLogRolling-testLogRolling/36a7bbbdc6f9f69fcba5b0d63463cc2e/recovered.edits/131.seqid, newMaxSeqId=131, maxSeqId=126 2024-11-10T13:03:31,596 INFO [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731243789484.36a7bbbdc6f9f69fcba5b0d63463cc2e. 2024-11-10T13:03:31,597 DEBUG [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 36a7bbbdc6f9f69fcba5b0d63463cc2e: Waiting for close lock at 1731243811591Running coprocessor pre-close hooks at 1731243811591Disabling compacts and flushes for region at 1731243811591Disabling writes for close at 1731243811591Writing region close event to WAL at 1731243811593 (+2 ms)Running coprocessor post-close hooks at 1731243811596 (+3 ms)Closed at 1731243811596 2024-11-10T13:03:31,597 DEBUG [RS_CLOSE_REGION-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1731243789484.36a7bbbdc6f9f69fcba5b0d63463cc2e. 2024-11-10T13:03:31,762 INFO [RS:0;3857ccc89b65:42123 {}] regionserver.HRegionServer(976): stopping server 3857ccc89b65,42123,1731243763912; all regions closed. 2024-11-10T13:03:31,762 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:03:31,763 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:03:31,763 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:03:31,763 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:03:31,763 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:03:31,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44757 is added to blk_1073741834_1010 (size=8107) 2024-11-10T13:03:31,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46573 is added to blk_1073741834_1010 (size=8107) 2024-11-10T13:03:31,767 DEBUG [RS:0;3857ccc89b65:42123 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/oldWALs 2024-11-10T13:03:31,767 INFO [RS:0;3857ccc89b65:42123 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3857ccc89b65%2C42123%2C1731243763912.meta:.meta(num 1731243764653) 2024-11-10T13:03:31,767 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:03:31,767 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:03:31,768 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:03:31,768 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:03:31,768 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:03:31,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44757 is added to blk_1073741877_1053 (size=780) 2024-11-10T13:03:31,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46573 is added to blk_1073741877_1053 (size=780) 2024-11-10T13:03:31,771 DEBUG [RS:0;3857ccc89b65:42123 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/oldWALs 2024-11-10T13:03:31,771 INFO [RS:0;3857ccc89b65:42123 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3857ccc89b65%2C42123%2C1731243763912:(num 1731243811449) 2024-11-10T13:03:31,771 DEBUG [RS:0;3857ccc89b65:42123 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T13:03:31,771 INFO [RS:0;3857ccc89b65:42123 {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T13:03:31,771 INFO [RS:0;3857ccc89b65:42123 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-10T13:03:31,772 INFO [RS:0;3857ccc89b65:42123 {}] hbase.ChoreService(370): Chore service for: regionserver/3857ccc89b65:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-10T13:03:31,772 INFO [RS:0;3857ccc89b65:42123 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-10T13:03:31,772 INFO [regionserver/3857ccc89b65:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-10T13:03:31,772 INFO [RS:0;3857ccc89b65:42123 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42123 2024-11-10T13:03:31,774 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42123-0x10101f9531f0001, quorum=127.0.0.1:54757, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3857ccc89b65,42123,1731243763912 2024-11-10T13:03:31,774 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44609-0x10101f9531f0000, quorum=127.0.0.1:54757, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-10T13:03:31,774 INFO [RS:0;3857ccc89b65:42123 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-10T13:03:31,775 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3857ccc89b65,42123,1731243763912] 2024-11-10T13:03:31,776 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3857ccc89b65,42123,1731243763912 already deleted, retry=false 2024-11-10T13:03:31,776 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3857ccc89b65,42123,1731243763912 expired; onlineServers=0 2024-11-10T13:03:31,776 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '3857ccc89b65,44609,1731243763869' ***** 2024-11-10T13:03:31,776 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-10T13:03:31,776 INFO [M:0;3857ccc89b65:44609 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-10T13:03:31,776 INFO [M:0;3857ccc89b65:44609 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-10T13:03:31,776 DEBUG [M:0;3857ccc89b65:44609 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-10T13:03:31,776 DEBUG [M:0;3857ccc89b65:44609 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-10T13:03:31,776 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-10T13:03:31,776 DEBUG [master/3857ccc89b65:0:becomeActiveMaster-HFileCleaner.large.0-1731243764053 {}] cleaner.HFileCleaner(306): Exit Thread[master/3857ccc89b65:0:becomeActiveMaster-HFileCleaner.large.0-1731243764053,5,FailOnTimeoutGroup] 2024-11-10T13:03:31,776 DEBUG [master/3857ccc89b65:0:becomeActiveMaster-HFileCleaner.small.0-1731243764054 {}] cleaner.HFileCleaner(306): Exit Thread[master/3857ccc89b65:0:becomeActiveMaster-HFileCleaner.small.0-1731243764054,5,FailOnTimeoutGroup] 2024-11-10T13:03:31,776 INFO [M:0;3857ccc89b65:44609 {}] hbase.ChoreService(370): Chore service for: master/3857ccc89b65:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-10T13:03:31,777 INFO [M:0;3857ccc89b65:44609 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-10T13:03:31,777 DEBUG [M:0;3857ccc89b65:44609 {}] master.HMaster(1795): Stopping service threads 2024-11-10T13:03:31,777 INFO [M:0;3857ccc89b65:44609 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-10T13:03:31,777 INFO [M:0;3857ccc89b65:44609 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-10T13:03:31,777 ERROR [M:0;3857ccc89b65:44609 {}] procedure2.ProcedureExecutor(763): There are still active thread in group java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10], see STDOUT java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10] Thread[IPC Parameter Sending Thread for localhost/127.0.0.1:36499,5,PEWorkerGroup] 2024-11-10T13:03:31,777 INFO [M:0;3857ccc89b65:44609 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-10T13:03:31,777 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-10T13:03:31,779 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44609-0x10101f9531f0000, quorum=127.0.0.1:54757, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-10T13:03:31,779 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44609-0x10101f9531f0000, quorum=127.0.0.1:54757, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:03:31,779 DEBUG [M:0;3857ccc89b65:44609 {}] zookeeper.ZKUtil(347): master:44609-0x10101f9531f0000, quorum=127.0.0.1:54757, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-10T13:03:31,779 WARN [M:0;3857ccc89b65:44609 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-10T13:03:31,779 INFO [M:0;3857ccc89b65:44609 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/.lastflushedseqids 2024-11-10T13:03:31,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44757 is added to blk_1073741878_1054 (size=228) 2024-11-10T13:03:31,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46573 is added to blk_1073741878_1054 (size=228) 2024-11-10T13:03:31,785 INFO [M:0;3857ccc89b65:44609 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-10T13:03:31,785 INFO [M:0;3857ccc89b65:44609 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-10T13:03:31,785 DEBUG [M:0;3857ccc89b65:44609 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-10T13:03:31,785 INFO [M:0;3857ccc89b65:44609 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T13:03:31,785 DEBUG [M:0;3857ccc89b65:44609 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T13:03:31,785 DEBUG [M:0;3857ccc89b65:44609 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-10T13:03:31,785 DEBUG [M:0;3857ccc89b65:44609 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T13:03:31,785 INFO [M:0;3857ccc89b65:44609 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.42 KB heapSize=63.35 KB 2024-11-10T13:03:31,801 DEBUG [M:0;3857ccc89b65:44609 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/40cbf11eaa9d4e5494783d7e2bdf2d7e is 82, key is hbase:meta,,1/info:regioninfo/1731243764677/Put/seqid=0 2024-11-10T13:03:31,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44757 is added to blk_1073741879_1055 (size=5672) 2024-11-10T13:03:31,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46573 is added to blk_1073741879_1055 (size=5672) 2024-11-10T13:03:31,806 INFO [M:0;3857ccc89b65:44609 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/40cbf11eaa9d4e5494783d7e2bdf2d7e 2024-11-10T13:03:31,825 DEBUG [M:0;3857ccc89b65:44609 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/329fd32fc3ef411da2bfa4de999f61aa is 750, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731243765103/Put/seqid=0 2024-11-10T13:03:31,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46573 is added to blk_1073741880_1056 (size=7090) 2024-11-10T13:03:31,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44757 is added to blk_1073741880_1056 (size=7090) 2024-11-10T13:03:31,831 INFO [M:0;3857ccc89b65:44609 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.81 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/329fd32fc3ef411da2bfa4de999f61aa 2024-11-10T13:03:31,835 INFO [M:0;3857ccc89b65:44609 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 329fd32fc3ef411da2bfa4de999f61aa 2024-11-10T13:03:31,849 DEBUG [M:0;3857ccc89b65:44609 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/aec6975e74f14599adead7a50d8a1fc8 is 69, key is 3857ccc89b65,42123,1731243763912/rs:state/1731243764146/Put/seqid=0 2024-11-10T13:03:31,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44757 is added to blk_1073741881_1057 (size=5156) 2024-11-10T13:03:31,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46573 is added to blk_1073741881_1057 (size=5156) 2024-11-10T13:03:31,854 INFO [M:0;3857ccc89b65:44609 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/aec6975e74f14599adead7a50d8a1fc8 2024-11-10T13:03:31,871 DEBUG [M:0;3857ccc89b65:44609 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/ae3fd12cfa654793b89f5d41650f6058 is 52, key is load_balancer_on/state:d/1731243764735/Put/seqid=0 2024-11-10T13:03:31,875 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42123-0x10101f9531f0001, quorum=127.0.0.1:54757, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T13:03:31,875 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42123-0x10101f9531f0001, quorum=127.0.0.1:54757, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T13:03:31,875 INFO [RS:0;3857ccc89b65:42123 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-10T13:03:31,875 INFO [RS:0;3857ccc89b65:42123 {}] regionserver.HRegionServer(1031): Exiting; stopping=3857ccc89b65,42123,1731243763912; zookeeper connection closed. 2024-11-10T13:03:31,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46573 is added to blk_1073741882_1058 (size=5056) 2024-11-10T13:03:31,876 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1d3c88a0 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1d3c88a0 2024-11-10T13:03:31,876 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-10T13:03:31,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44757 is added to blk_1073741882_1058 (size=5056) 2024-11-10T13:03:31,876 INFO [M:0;3857ccc89b65:44609 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/ae3fd12cfa654793b89f5d41650f6058 2024-11-10T13:03:31,881 DEBUG [M:0;3857ccc89b65:44609 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/40cbf11eaa9d4e5494783d7e2bdf2d7e as hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/40cbf11eaa9d4e5494783d7e2bdf2d7e 2024-11-10T13:03:31,885 INFO [M:0;3857ccc89b65:44609 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/40cbf11eaa9d4e5494783d7e2bdf2d7e, entries=8, sequenceid=125, filesize=5.5 K 2024-11-10T13:03:31,886 DEBUG [M:0;3857ccc89b65:44609 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/329fd32fc3ef411da2bfa4de999f61aa as hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/329fd32fc3ef411da2bfa4de999f61aa 2024-11-10T13:03:31,890 INFO [M:0;3857ccc89b65:44609 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 329fd32fc3ef411da2bfa4de999f61aa 2024-11-10T13:03:31,890 INFO [M:0;3857ccc89b65:44609 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/329fd32fc3ef411da2bfa4de999f61aa, entries=13, sequenceid=125, filesize=6.9 K 2024-11-10T13:03:31,890 DEBUG [M:0;3857ccc89b65:44609 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/aec6975e74f14599adead7a50d8a1fc8 as hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/aec6975e74f14599adead7a50d8a1fc8 2024-11-10T13:03:31,894 INFO [M:0;3857ccc89b65:44609 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/aec6975e74f14599adead7a50d8a1fc8, entries=1, sequenceid=125, filesize=5.0 K 2024-11-10T13:03:31,895 DEBUG [M:0;3857ccc89b65:44609 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/ae3fd12cfa654793b89f5d41650f6058 as hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/ae3fd12cfa654793b89f5d41650f6058 2024-11-10T13:03:31,899 INFO [M:0;3857ccc89b65:44609 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36499/user/jenkins/test-data/e537a675-72a8-060c-8774-d5c86d6c73c9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/ae3fd12cfa654793b89f5d41650f6058, entries=1, sequenceid=125, filesize=4.9 K 2024-11-10T13:03:31,900 INFO [M:0;3857ccc89b65:44609 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.42 KB/52651, heapSize ~63.29 KB/64808, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 115ms, sequenceid=125, compaction requested=false 2024-11-10T13:03:31,901 INFO [M:0;3857ccc89b65:44609 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T13:03:31,901 DEBUG [M:0;3857ccc89b65:44609 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731243811785Disabling compacts and flushes for region at 1731243811785Disabling writes for close at 1731243811785Obtaining lock to block concurrent updates at 1731243811785Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731243811785Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52651, getHeapSize=64808, getOffHeapSize=0, getCellsCount=148 at 1731243811786 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731243811786Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731243811786Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731243811801 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731243811801Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731243811811 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731243811825 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731243811825Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731243811835 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731243811849 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731243811849Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731243811857 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731243811871 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731243811871Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5ea10ba2: reopening flushed file at 1731243811881 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@77de5e15: reopening flushed file at 1731243811885 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@39914e4f: reopening flushed file at 1731243811890 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7ca48035: reopening flushed file at 1731243811894 (+4 ms)Finished flush of dataSize ~51.42 KB/52651, heapSize ~63.29 KB/64808, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 115ms, sequenceid=125, compaction requested=false at 1731243811900 (+6 ms)Writing region close event to WAL at 1731243811901 (+1 ms)Closed at 1731243811901 2024-11-10T13:03:31,901 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:03:31,902 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:03:31,902 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:03:31,902 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:03:31,902 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:03:31,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44757 is added to blk_1073741830_1006 (size=61320) 2024-11-10T13:03:31,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46573 is added to blk_1073741830_1006 (size=61320) 2024-11-10T13:03:31,904 INFO [M:0;3857ccc89b65:44609 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-10T13:03:31,904 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-10T13:03:31,904 INFO [M:0;3857ccc89b65:44609 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44609 2024-11-10T13:03:31,905 INFO [M:0;3857ccc89b65:44609 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-10T13:03:32,006 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44609-0x10101f9531f0000, quorum=127.0.0.1:54757, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T13:03:32,006 INFO [M:0;3857ccc89b65:44609 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-10T13:03:32,006 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44609-0x10101f9531f0000, quorum=127.0.0.1:54757, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T13:03:32,009 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@42d8f83b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T13:03:32,009 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3d31ee43{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T13:03:32,009 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T13:03:32,009 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5ff7780b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T13:03:32,010 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3612be31{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2129a985-0107-fd4d-1ed0-3e66a6379ba5/hadoop.log.dir/,STOPPED} 2024-11-10T13:03:32,012 WARN [BP-422773242-172.17.0.2-1731243763174 heartbeating to localhost/127.0.0.1:36499 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-10T13:03:32,012 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-10T13:03:32,012 WARN [BP-422773242-172.17.0.2-1731243763174 heartbeating to localhost/127.0.0.1:36499 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-422773242-172.17.0.2-1731243763174 (Datanode Uuid febf492f-ce3e-4b83-baff-1f10f21665d8) service to localhost/127.0.0.1:36499 2024-11-10T13:03:32,012 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-10T13:03:32,012 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2129a985-0107-fd4d-1ed0-3e66a6379ba5/cluster_c2aaed9f-bc9f-becc-03cb-379711ed8b04/data/data3/current/BP-422773242-172.17.0.2-1731243763174 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T13:03:32,012 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2129a985-0107-fd4d-1ed0-3e66a6379ba5/cluster_c2aaed9f-bc9f-becc-03cb-379711ed8b04/data/data4/current/BP-422773242-172.17.0.2-1731243763174 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T13:03:32,013 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-10T13:03:32,014 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@277e18bc{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T13:03:32,015 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5ce0132a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T13:03:32,015 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T13:03:32,015 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3bd9a438{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T13:03:32,015 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@595aaa8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2129a985-0107-fd4d-1ed0-3e66a6379ba5/hadoop.log.dir/,STOPPED} 2024-11-10T13:03:32,017 WARN [BP-422773242-172.17.0.2-1731243763174 heartbeating to localhost/127.0.0.1:36499 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-10T13:03:32,017 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-10T13:03:32,017 WARN [BP-422773242-172.17.0.2-1731243763174 heartbeating to localhost/127.0.0.1:36499 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-422773242-172.17.0.2-1731243763174 (Datanode Uuid 07ecba63-805c-4a7d-bbc4-0d5c4212a0b7) service to localhost/127.0.0.1:36499 2024-11-10T13:03:32,017 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-10T13:03:32,017 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2129a985-0107-fd4d-1ed0-3e66a6379ba5/cluster_c2aaed9f-bc9f-becc-03cb-379711ed8b04/data/data1/current/BP-422773242-172.17.0.2-1731243763174 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T13:03:32,017 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2129a985-0107-fd4d-1ed0-3e66a6379ba5/cluster_c2aaed9f-bc9f-becc-03cb-379711ed8b04/data/data2/current/BP-422773242-172.17.0.2-1731243763174 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T13:03:32,018 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-10T13:03:32,023 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@63c68f65{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-10T13:03:32,024 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@349a863b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T13:03:32,024 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T13:03:32,024 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@25570184{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T13:03:32,024 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@188ddc10{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2129a985-0107-fd4d-1ed0-3e66a6379ba5/hadoop.log.dir/,STOPPED} 2024-11-10T13:03:32,031 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-10T13:03:32,032 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:32,032 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:32,057 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-10T13:03:32,066 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=233 (was 207) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:36499 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:36499 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36499 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36499 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36499 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:36499 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36499 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:36499 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:36499 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=515 (was 483) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=47 (was 108), ProcessCount=11 (was 11), AvailableMemoryMB=7797 (was 7834) 2024-11-10T13:03:32,074 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=233, OpenFileDescriptor=515, MaxFileDescriptor=1048576, SystemLoadAverage=47, ProcessCount=11, AvailableMemoryMB=7796 2024-11-10T13:03:32,074 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-10T13:03:32,074 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2129a985-0107-fd4d-1ed0-3e66a6379ba5/hadoop.log.dir so I do NOT create it in target/test-data/3181f261-7f92-56e3-8fc9-effb5d5ef3a9 2024-11-10T13:03:32,074 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2129a985-0107-fd4d-1ed0-3e66a6379ba5/hadoop.tmp.dir so I do NOT create it in target/test-data/3181f261-7f92-56e3-8fc9-effb5d5ef3a9 2024-11-10T13:03:32,075 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3181f261-7f92-56e3-8fc9-effb5d5ef3a9/cluster_acd6cf25-8840-9386-b061-b043eca8f1b0, deleteOnExit=true 2024-11-10T13:03:32,075 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-10T13:03:32,075 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3181f261-7f92-56e3-8fc9-effb5d5ef3a9/test.cache.data in system properties and HBase conf 2024-11-10T13:03:32,075 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3181f261-7f92-56e3-8fc9-effb5d5ef3a9/hadoop.tmp.dir in system properties and HBase conf 2024-11-10T13:03:32,075 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3181f261-7f92-56e3-8fc9-effb5d5ef3a9/hadoop.log.dir in system properties and HBase conf 2024-11-10T13:03:32,075 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3181f261-7f92-56e3-8fc9-effb5d5ef3a9/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-10T13:03:32,075 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3181f261-7f92-56e3-8fc9-effb5d5ef3a9/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-10T13:03:32,075 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-10T13:03:32,075 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-10T13:03:32,075 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3181f261-7f92-56e3-8fc9-effb5d5ef3a9/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-10T13:03:32,075 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3181f261-7f92-56e3-8fc9-effb5d5ef3a9/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-10T13:03:32,076 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3181f261-7f92-56e3-8fc9-effb5d5ef3a9/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-10T13:03:32,076 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3181f261-7f92-56e3-8fc9-effb5d5ef3a9/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-10T13:03:32,076 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3181f261-7f92-56e3-8fc9-effb5d5ef3a9/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-10T13:03:32,076 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3181f261-7f92-56e3-8fc9-effb5d5ef3a9/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-10T13:03:32,076 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3181f261-7f92-56e3-8fc9-effb5d5ef3a9/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-10T13:03:32,076 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3181f261-7f92-56e3-8fc9-effb5d5ef3a9/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-10T13:03:32,076 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3181f261-7f92-56e3-8fc9-effb5d5ef3a9/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-10T13:03:32,076 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3181f261-7f92-56e3-8fc9-effb5d5ef3a9/nfs.dump.dir in system properties and HBase conf 2024-11-10T13:03:32,076 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3181f261-7f92-56e3-8fc9-effb5d5ef3a9/java.io.tmpdir in system properties and HBase conf 2024-11-10T13:03:32,076 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3181f261-7f92-56e3-8fc9-effb5d5ef3a9/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-10T13:03:32,076 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3181f261-7f92-56e3-8fc9-effb5d5ef3a9/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-10T13:03:32,076 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3181f261-7f92-56e3-8fc9-effb5d5ef3a9/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-10T13:03:32,089 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-10T13:03:32,145 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T13:03:32,148 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T13:03:32,149 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T13:03:32,149 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T13:03:32,149 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-10T13:03:32,150 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T13:03:32,150 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3287f588{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3181f261-7f92-56e3-8fc9-effb5d5ef3a9/hadoop.log.dir/,AVAILABLE} 2024-11-10T13:03:32,151 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7fa6f74d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T13:03:32,162 INFO [regionserver/3857ccc89b65:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T13:03:32,264 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@24d8e529{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3181f261-7f92-56e3-8fc9-effb5d5ef3a9/java.io.tmpdir/jetty-localhost-43411-hadoop-hdfs-3_4_1-tests_jar-_-any-2277109834922707659/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-10T13:03:32,265 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1e07b1d{HTTP/1.1, (http/1.1)}{localhost:43411} 2024-11-10T13:03:32,265 INFO [Time-limited test {}] server.Server(415): Started @285235ms 2024-11-10T13:03:32,278 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-10T13:03:32,309 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-10T13:03:32,309 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-10T13:03:32,310 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-10T13:03:32,310 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-10T13:03:32,329 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T13:03:32,331 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T13:03:32,332 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T13:03:32,332 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T13:03:32,332 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-10T13:03:32,333 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5bfccd5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3181f261-7f92-56e3-8fc9-effb5d5ef3a9/hadoop.log.dir/,AVAILABLE} 2024-11-10T13:03:32,333 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4548a64b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T13:03:32,447 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1926aa54{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3181f261-7f92-56e3-8fc9-effb5d5ef3a9/java.io.tmpdir/jetty-localhost-35909-hadoop-hdfs-3_4_1-tests_jar-_-any-14869780481408880365/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T13:03:32,447 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7b01e479{HTTP/1.1, (http/1.1)}{localhost:35909} 2024-11-10T13:03:32,447 INFO [Time-limited test {}] server.Server(415): Started @285418ms 2024-11-10T13:03:32,448 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-10T13:03:32,478 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T13:03:32,480 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T13:03:32,481 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T13:03:32,481 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T13:03:32,481 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-10T13:03:32,481 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@352a3917{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3181f261-7f92-56e3-8fc9-effb5d5ef3a9/hadoop.log.dir/,AVAILABLE} 2024-11-10T13:03:32,482 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@f9090c1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T13:03:32,539 WARN [Thread-2468 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3181f261-7f92-56e3-8fc9-effb5d5ef3a9/cluster_acd6cf25-8840-9386-b061-b043eca8f1b0/data/data1/current/BP-616104907-172.17.0.2-1731243812094/current, will proceed with Du for space computation calculation, 2024-11-10T13:03:32,539 WARN [Thread-2469 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3181f261-7f92-56e3-8fc9-effb5d5ef3a9/cluster_acd6cf25-8840-9386-b061-b043eca8f1b0/data/data2/current/BP-616104907-172.17.0.2-1731243812094/current, will proceed with Du for space computation calculation, 2024-11-10T13:03:32,555 WARN [Thread-2447 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-10T13:03:32,558 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfafbb5de6f5e8a with lease ID 0x96bf2e514773b46a: Processing first storage report for DS-8ece3a19-e8ab-45d4-bbae-5bbca7875072 from datanode DatanodeRegistration(127.0.0.1:34429, datanodeUuid=96a832b6-fea4-4f7e-add4-3b0419bfda04, infoPort=40497, infoSecurePort=0, ipcPort=35583, storageInfo=lv=-57;cid=testClusterID;nsid=1704068041;c=1731243812094) 2024-11-10T13:03:32,558 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfafbb5de6f5e8a with lease ID 0x96bf2e514773b46a: from storage DS-8ece3a19-e8ab-45d4-bbae-5bbca7875072 node DatanodeRegistration(127.0.0.1:34429, datanodeUuid=96a832b6-fea4-4f7e-add4-3b0419bfda04, infoPort=40497, infoSecurePort=0, ipcPort=35583, storageInfo=lv=-57;cid=testClusterID;nsid=1704068041;c=1731243812094), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-10T13:03:32,558 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfafbb5de6f5e8a with lease ID 0x96bf2e514773b46a: Processing first storage report for DS-e1c92f13-c81c-4997-ac9a-3e9fb63b1cfa from datanode DatanodeRegistration(127.0.0.1:34429, datanodeUuid=96a832b6-fea4-4f7e-add4-3b0419bfda04, infoPort=40497, infoSecurePort=0, ipcPort=35583, storageInfo=lv=-57;cid=testClusterID;nsid=1704068041;c=1731243812094) 2024-11-10T13:03:32,558 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfafbb5de6f5e8a with lease ID 0x96bf2e514773b46a: from storage DS-e1c92f13-c81c-4997-ac9a-3e9fb63b1cfa node DatanodeRegistration(127.0.0.1:34429, datanodeUuid=96a832b6-fea4-4f7e-add4-3b0419bfda04, infoPort=40497, infoSecurePort=0, ipcPort=35583, storageInfo=lv=-57;cid=testClusterID;nsid=1704068041;c=1731243812094), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T13:03:32,598 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@12ac7c6{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3181f261-7f92-56e3-8fc9-effb5d5ef3a9/java.io.tmpdir/jetty-localhost-41513-hadoop-hdfs-3_4_1-tests_jar-_-any-3351085585218810840/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T13:03:32,599 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@9268180{HTTP/1.1, (http/1.1)}{localhost:41513} 2024-11-10T13:03:32,599 INFO [Time-limited test {}] server.Server(415): Started @285569ms 2024-11-10T13:03:32,600 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-10T13:03:32,689 WARN [Thread-2494 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3181f261-7f92-56e3-8fc9-effb5d5ef3a9/cluster_acd6cf25-8840-9386-b061-b043eca8f1b0/data/data3/current/BP-616104907-172.17.0.2-1731243812094/current, will proceed with Du for space computation calculation, 2024-11-10T13:03:32,689 WARN [Thread-2495 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3181f261-7f92-56e3-8fc9-effb5d5ef3a9/cluster_acd6cf25-8840-9386-b061-b043eca8f1b0/data/data4/current/BP-616104907-172.17.0.2-1731243812094/current, will proceed with Du for space computation calculation, 2024-11-10T13:03:32,705 WARN [Thread-2483 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-10T13:03:32,707 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x23bf3f6345531ee6 with lease ID 0x96bf2e514773b46b: Processing first storage report for DS-afe9c32e-6fc5-4997-a15a-d49a8a823710 from datanode DatanodeRegistration(127.0.0.1:39817, datanodeUuid=60fd9033-d84f-474e-9c79-5a8dd21a42b2, infoPort=46007, infoSecurePort=0, ipcPort=35375, storageInfo=lv=-57;cid=testClusterID;nsid=1704068041;c=1731243812094) 2024-11-10T13:03:32,707 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x23bf3f6345531ee6 with lease ID 0x96bf2e514773b46b: from storage DS-afe9c32e-6fc5-4997-a15a-d49a8a823710 node DatanodeRegistration(127.0.0.1:39817, datanodeUuid=60fd9033-d84f-474e-9c79-5a8dd21a42b2, infoPort=46007, infoSecurePort=0, ipcPort=35375, storageInfo=lv=-57;cid=testClusterID;nsid=1704068041;c=1731243812094), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T13:03:32,707 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x23bf3f6345531ee6 with lease ID 0x96bf2e514773b46b: Processing first storage report for DS-9d9c978a-c141-4022-a830-608e5e557efa from datanode DatanodeRegistration(127.0.0.1:39817, datanodeUuid=60fd9033-d84f-474e-9c79-5a8dd21a42b2, infoPort=46007, infoSecurePort=0, ipcPort=35375, storageInfo=lv=-57;cid=testClusterID;nsid=1704068041;c=1731243812094) 2024-11-10T13:03:32,707 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x23bf3f6345531ee6 with lease ID 0x96bf2e514773b46b: from storage DS-9d9c978a-c141-4022-a830-608e5e557efa node DatanodeRegistration(127.0.0.1:39817, datanodeUuid=60fd9033-d84f-474e-9c79-5a8dd21a42b2, infoPort=46007, infoSecurePort=0, ipcPort=35375, storageInfo=lv=-57;cid=testClusterID;nsid=1704068041;c=1731243812094), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T13:03:32,722 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3181f261-7f92-56e3-8fc9-effb5d5ef3a9 2024-11-10T13:03:32,724 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3181f261-7f92-56e3-8fc9-effb5d5ef3a9/cluster_acd6cf25-8840-9386-b061-b043eca8f1b0/zookeeper_0, clientPort=55137, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3181f261-7f92-56e3-8fc9-effb5d5ef3a9/cluster_acd6cf25-8840-9386-b061-b043eca8f1b0/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3181f261-7f92-56e3-8fc9-effb5d5ef3a9/cluster_acd6cf25-8840-9386-b061-b043eca8f1b0/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-10T13:03:32,725 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=55137 2024-11-10T13:03:32,725 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:03:32,726 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:03:32,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34429 is added to blk_1073741825_1001 (size=7) 2024-11-10T13:03:32,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39817 is added to blk_1073741825_1001 (size=7) 2024-11-10T13:03:32,735 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:42877/user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde with version=8 2024-11-10T13:03:32,735 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:44081/user/jenkins/test-data/c736e2d5-660c-d558-be39-13b3a22411cb/hbase-staging 2024-11-10T13:03:32,737 INFO [Time-limited test {}] client.ConnectionUtils(128): master/3857ccc89b65:0 server-side Connection retries=45 2024-11-10T13:03:32,737 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T13:03:32,737 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-10T13:03:32,737 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-10T13:03:32,737 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T13:03:32,737 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-10T13:03:32,737 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-10T13:03:32,737 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-10T13:03:32,738 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34887 2024-11-10T13:03:32,739 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:34887 connecting to ZooKeeper ensemble=127.0.0.1:55137 2024-11-10T13:03:32,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:348870x0, quorum=127.0.0.1:55137, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-10T13:03:32,745 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34887-0x10101fa12030000 connected 2024-11-10T13:03:32,763 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:03:32,764 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:03:32,766 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34887-0x10101fa12030000, quorum=127.0.0.1:55137, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T13:03:32,766 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:42877/user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde, hbase.cluster.distributed=false 2024-11-10T13:03:32,767 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34887-0x10101fa12030000, quorum=127.0.0.1:55137, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-10T13:03:32,769 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34887 2024-11-10T13:03:32,769 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34887 2024-11-10T13:03:32,769 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34887 2024-11-10T13:03:32,770 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34887 2024-11-10T13:03:32,770 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34887 2024-11-10T13:03:32,785 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3857ccc89b65:0 server-side Connection retries=45 2024-11-10T13:03:32,785 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T13:03:32,785 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-10T13:03:32,785 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-10T13:03:32,785 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T13:03:32,785 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-10T13:03:32,785 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-10T13:03:32,785 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-10T13:03:32,786 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37365 2024-11-10T13:03:32,787 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37365 connecting to ZooKeeper ensemble=127.0.0.1:55137 2024-11-10T13:03:32,787 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:03:32,788 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:03:32,792 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:373650x0, quorum=127.0.0.1:55137, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-10T13:03:32,792 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37365-0x10101fa12030001 connected 2024-11-10T13:03:32,792 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37365-0x10101fa12030001, quorum=127.0.0.1:55137, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T13:03:32,793 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-10T13:03:32,793 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-10T13:03:32,794 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37365-0x10101fa12030001, quorum=127.0.0.1:55137, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-10T13:03:32,794 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37365-0x10101fa12030001, quorum=127.0.0.1:55137, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-10T13:03:32,795 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37365 2024-11-10T13:03:32,795 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37365 2024-11-10T13:03:32,795 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37365 2024-11-10T13:03:32,795 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37365 2024-11-10T13:03:32,796 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37365 2024-11-10T13:03:32,807 DEBUG [M:0;3857ccc89b65:34887 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;3857ccc89b65:34887 2024-11-10T13:03:32,807 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/3857ccc89b65,34887,1731243812737 2024-11-10T13:03:32,808 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37365-0x10101fa12030001, quorum=127.0.0.1:55137, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T13:03:32,808 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34887-0x10101fa12030000, quorum=127.0.0.1:55137, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T13:03:32,809 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34887-0x10101fa12030000, quorum=127.0.0.1:55137, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/3857ccc89b65,34887,1731243812737 2024-11-10T13:03:32,811 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37365-0x10101fa12030001, quorum=127.0.0.1:55137, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-10T13:03:32,811 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37365-0x10101fa12030001, quorum=127.0.0.1:55137, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:03:32,811 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34887-0x10101fa12030000, quorum=127.0.0.1:55137, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:03:32,812 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34887-0x10101fa12030000, quorum=127.0.0.1:55137, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-10T13:03:32,812 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/3857ccc89b65,34887,1731243812737 from backup master directory 2024-11-10T13:03:32,813 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34887-0x10101fa12030000, quorum=127.0.0.1:55137, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/3857ccc89b65,34887,1731243812737 2024-11-10T13:03:32,813 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37365-0x10101fa12030001, quorum=127.0.0.1:55137, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T13:03:32,813 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34887-0x10101fa12030000, quorum=127.0.0.1:55137, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T13:03:32,813 WARN [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-10T13:03:32,813 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=3857ccc89b65,34887,1731243812737 2024-11-10T13:03:32,817 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:42877/user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde/hbase.id] with ID: 0efe23ce-3471-4c0e-b5a5-87c9f366bb69 2024-11-10T13:03:32,817 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:42877/user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde/.tmp/hbase.id 2024-11-10T13:03:32,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34429 is added to blk_1073741826_1002 (size=42) 2024-11-10T13:03:32,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39817 is added to blk_1073741826_1002 (size=42) 2024-11-10T13:03:32,823 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:42877/user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde/.tmp/hbase.id]:[hdfs://localhost:42877/user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde/hbase.id] 2024-11-10T13:03:32,833 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:03:32,833 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-10T13:03:32,834 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-10T13:03:32,836 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37365-0x10101fa12030001, quorum=127.0.0.1:55137, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:03:32,836 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34887-0x10101fa12030000, quorum=127.0.0.1:55137, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:03:32,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34429 is added to blk_1073741827_1003 (size=196) 2024-11-10T13:03:32,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39817 is added to blk_1073741827_1003 (size=196) 2024-11-10T13:03:32,841 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-10T13:03:32,842 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-10T13:03:32,842 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-10T13:03:32,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34429 is added to blk_1073741828_1004 (size=1189) 2024-11-10T13:03:32,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39817 is added to blk_1073741828_1004 (size=1189) 2024-11-10T13:03:32,849 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:42877/user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde/MasterData/data/master/store 2024-11-10T13:03:32,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34429 is added to blk_1073741829_1005 (size=34) 2024-11-10T13:03:32,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39817 is added to blk_1073741829_1005 (size=34) 2024-11-10T13:03:32,854 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T13:03:32,854 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-10T13:03:32,854 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T13:03:32,854 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T13:03:32,854 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-10T13:03:32,854 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T13:03:32,854 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T13:03:32,854 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731243812854Disabling compacts and flushes for region at 1731243812854Disabling writes for close at 1731243812854Writing region close event to WAL at 1731243812854Closed at 1731243812854 2024-11-10T13:03:32,855 WARN [master/3857ccc89b65:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:42877/user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde/MasterData/data/master/store/.initializing 2024-11-10T13:03:32,855 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:42877/user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde/MasterData/WALs/3857ccc89b65,34887,1731243812737 2024-11-10T13:03:32,857 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3857ccc89b65%2C34887%2C1731243812737, suffix=, logDir=hdfs://localhost:42877/user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde/MasterData/WALs/3857ccc89b65,34887,1731243812737, archiveDir=hdfs://localhost:42877/user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde/MasterData/oldWALs, maxLogs=10 2024-11-10T13:03:32,857 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3857ccc89b65%2C34887%2C1731243812737.1731243812857 2024-11-10T13:03:32,861 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde/MasterData/WALs/3857ccc89b65,34887,1731243812737/3857ccc89b65%2C34887%2C1731243812737.1731243812857 2024-11-10T13:03:32,865 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40497:40497),(127.0.0.1/127.0.0.1:46007:46007)] 2024-11-10T13:03:32,866 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-10T13:03:32,866 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T13:03:32,866 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:03:32,866 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:03:32,867 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:03:32,868 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-10T13:03:32,868 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:03:32,869 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:03:32,869 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:03:32,870 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-10T13:03:32,870 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:03:32,870 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T13:03:32,870 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:03:32,871 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-10T13:03:32,871 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:03:32,872 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T13:03:32,872 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:03:32,872 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-10T13:03:32,873 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:03:32,873 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T13:03:32,873 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:03:32,874 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42877/user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:03:32,874 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42877/user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:03:32,875 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:03:32,875 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:03:32,876 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-10T13:03:32,877 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T13:03:32,879 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42877/user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T13:03:32,879 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=747919, jitterRate=-0.048972830176353455}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-10T13:03:32,880 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731243812866Initializing all the Stores at 1731243812867 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731243812867Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731243812867Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731243812867Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731243812867Cleaning up temporary data from old regions at 1731243812875 (+8 ms)Region opened successfully at 1731243812880 (+5 ms) 2024-11-10T13:03:32,880 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-10T13:03:32,883 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@553bf401, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3857ccc89b65/172.17.0.2:0 2024-11-10T13:03:32,884 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-10T13:03:32,884 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-10T13:03:32,884 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-10T13:03:32,884 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-10T13:03:32,885 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-10T13:03:32,885 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-10T13:03:32,885 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-10T13:03:32,887 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-10T13:03:32,888 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34887-0x10101fa12030000, quorum=127.0.0.1:55137, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-10T13:03:32,889 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-10T13:03:32,889 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-10T13:03:32,890 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34887-0x10101fa12030000, quorum=127.0.0.1:55137, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-10T13:03:32,892 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-10T13:03:32,892 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-10T13:03:32,893 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34887-0x10101fa12030000, quorum=127.0.0.1:55137, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-10T13:03:32,894 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-10T13:03:32,894 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34887-0x10101fa12030000, quorum=127.0.0.1:55137, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-10T13:03:32,895 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-10T13:03:32,897 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34887-0x10101fa12030000, quorum=127.0.0.1:55137, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-10T13:03:32,898 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-10T13:03:32,900 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37365-0x10101fa12030001, quorum=127.0.0.1:55137, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-10T13:03:32,900 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34887-0x10101fa12030000, quorum=127.0.0.1:55137, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-10T13:03:32,900 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37365-0x10101fa12030001, quorum=127.0.0.1:55137, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:03:32,900 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34887-0x10101fa12030000, quorum=127.0.0.1:55137, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:03:32,901 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=3857ccc89b65,34887,1731243812737, sessionid=0x10101fa12030000, setting cluster-up flag (Was=false) 2024-11-10T13:03:32,903 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37365-0x10101fa12030001, quorum=127.0.0.1:55137, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:03:32,903 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34887-0x10101fa12030000, quorum=127.0.0.1:55137, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:03:32,908 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-10T13:03:32,909 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3857ccc89b65,34887,1731243812737 2024-11-10T13:03:32,911 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34887-0x10101fa12030000, quorum=127.0.0.1:55137, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:03:32,911 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37365-0x10101fa12030001, quorum=127.0.0.1:55137, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:03:32,917 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-10T13:03:32,918 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3857ccc89b65,34887,1731243812737 2024-11-10T13:03:32,919 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:42877/user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-10T13:03:32,920 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-10T13:03:32,921 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-10T13:03:32,921 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-10T13:03:32,921 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 3857ccc89b65,34887,1731243812737 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-10T13:03:32,922 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/3857ccc89b65:0, corePoolSize=5, maxPoolSize=5 2024-11-10T13:03:32,922 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/3857ccc89b65:0, corePoolSize=5, maxPoolSize=5 2024-11-10T13:03:32,922 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/3857ccc89b65:0, corePoolSize=5, maxPoolSize=5 2024-11-10T13:03:32,922 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/3857ccc89b65:0, corePoolSize=5, maxPoolSize=5 2024-11-10T13:03:32,922 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/3857ccc89b65:0, corePoolSize=10, maxPoolSize=10 2024-11-10T13:03:32,922 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:03:32,922 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/3857ccc89b65:0, corePoolSize=2, maxPoolSize=2 2024-11-10T13:03:32,922 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:03:32,925 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731243842925 2024-11-10T13:03:32,925 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-10T13:03:32,925 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-10T13:03:32,925 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-10T13:03:32,925 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-10T13:03:32,925 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-10T13:03:32,925 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-10T13:03:32,925 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-10T13:03:32,926 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-10T13:03:32,926 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:03:32,927 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-10T13:03:32,928 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-10T13:03:32,929 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-10T13:03:32,929 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-10T13:03:32,929 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-10T13:03:32,931 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-10T13:03:32,931 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-10T13:03:32,931 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/3857ccc89b65:0:becomeActiveMaster-HFileCleaner.large.0-1731243812931,5,FailOnTimeoutGroup] 2024-11-10T13:03:32,931 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/3857ccc89b65:0:becomeActiveMaster-HFileCleaner.small.0-1731243812931,5,FailOnTimeoutGroup] 2024-11-10T13:03:32,931 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-10T13:03:32,931 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-10T13:03:32,931 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-10T13:03:32,931 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-10T13:03:32,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39817 is added to blk_1073741831_1007 (size=1321) 2024-11-10T13:03:32,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34429 is added to blk_1073741831_1007 (size=1321) 2024-11-10T13:03:32,937 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:42877/user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-10T13:03:32,938 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42877/user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde 2024-11-10T13:03:32,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34429 is added to blk_1073741832_1008 (size=32) 2024-11-10T13:03:32,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39817 is added to blk_1073741832_1008 (size=32) 2024-11-10T13:03:32,950 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T13:03:32,951 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-10T13:03:32,952 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-10T13:03:32,952 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:03:32,953 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:03:32,953 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-10T13:03:32,954 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-10T13:03:32,954 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:03:32,954 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:03:32,954 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-10T13:03:32,955 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-10T13:03:32,955 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:03:32,956 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:03:32,956 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-10T13:03:32,957 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-10T13:03:32,957 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:03:32,957 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:03:32,957 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-10T13:03:32,958 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42877/user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde/data/hbase/meta/1588230740 2024-11-10T13:03:32,958 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42877/user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde/data/hbase/meta/1588230740 2024-11-10T13:03:32,959 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-10T13:03:32,959 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-10T13:03:32,960 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-10T13:03:32,961 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-10T13:03:32,963 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42877/user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T13:03:32,963 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=837051, jitterRate=0.06436645984649658}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-10T13:03:32,964 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731243812950Initializing all the Stores at 1731243812950Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731243812950Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731243812951 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731243812951Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731243812951Cleaning up temporary data from old regions at 1731243812959 (+8 ms)Region opened successfully at 1731243812964 (+5 ms) 2024-11-10T13:03:32,964 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-10T13:03:32,964 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-10T13:03:32,964 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-10T13:03:32,964 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-10T13:03:32,964 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-10T13:03:32,964 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-10T13:03:32,964 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731243812964Disabling compacts and flushes for region at 1731243812964Disabling writes for close at 1731243812964Writing region close event to WAL at 1731243812964Closed at 1731243812964 2024-11-10T13:03:32,965 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-10T13:03:32,965 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-10T13:03:32,965 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-10T13:03:32,967 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-10T13:03:32,967 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-10T13:03:32,997 INFO [RS:0;3857ccc89b65:37365 {}] regionserver.HRegionServer(746): ClusterId : 0efe23ce-3471-4c0e-b5a5-87c9f366bb69 2024-11-10T13:03:32,997 DEBUG [RS:0;3857ccc89b65:37365 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-10T13:03:32,999 DEBUG [RS:0;3857ccc89b65:37365 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-10T13:03:32,999 DEBUG [RS:0;3857ccc89b65:37365 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-10T13:03:33,002 DEBUG [RS:0;3857ccc89b65:37365 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-10T13:03:33,003 DEBUG [RS:0;3857ccc89b65:37365 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@78d0a61a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3857ccc89b65/172.17.0.2:0 2024-11-10T13:03:33,015 DEBUG [RS:0;3857ccc89b65:37365 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;3857ccc89b65:37365 2024-11-10T13:03:33,015 INFO [RS:0;3857ccc89b65:37365 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-10T13:03:33,015 INFO [RS:0;3857ccc89b65:37365 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-10T13:03:33,015 DEBUG [RS:0;3857ccc89b65:37365 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-10T13:03:33,015 INFO [RS:0;3857ccc89b65:37365 {}] regionserver.HRegionServer(2659): reportForDuty to master=3857ccc89b65,34887,1731243812737 with port=37365, startcode=1731243812784 2024-11-10T13:03:33,015 DEBUG [RS:0;3857ccc89b65:37365 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-10T13:03:33,018 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59985, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-11-10T13:03:33,018 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34887 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3857ccc89b65,37365,1731243812784 2024-11-10T13:03:33,018 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34887 {}] master.ServerManager(517): Registering regionserver=3857ccc89b65,37365,1731243812784 2024-11-10T13:03:33,020 DEBUG [RS:0;3857ccc89b65:37365 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42877/user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde 2024-11-10T13:03:33,020 DEBUG [RS:0;3857ccc89b65:37365 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42877 2024-11-10T13:03:33,020 DEBUG [RS:0;3857ccc89b65:37365 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-10T13:03:33,021 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34887-0x10101fa12030000, quorum=127.0.0.1:55137, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-10T13:03:33,022 DEBUG [RS:0;3857ccc89b65:37365 {}] zookeeper.ZKUtil(111): regionserver:37365-0x10101fa12030001, quorum=127.0.0.1:55137, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3857ccc89b65,37365,1731243812784 2024-11-10T13:03:33,022 WARN [RS:0;3857ccc89b65:37365 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-10T13:03:33,022 INFO [RS:0;3857ccc89b65:37365 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-10T13:03:33,022 DEBUG [RS:0;3857ccc89b65:37365 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42877/user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde/WALs/3857ccc89b65,37365,1731243812784 2024-11-10T13:03:33,022 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3857ccc89b65,37365,1731243812784] 2024-11-10T13:03:33,025 INFO [RS:0;3857ccc89b65:37365 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-10T13:03:33,026 INFO [RS:0;3857ccc89b65:37365 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-10T13:03:33,027 INFO [RS:0;3857ccc89b65:37365 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-10T13:03:33,027 INFO [RS:0;3857ccc89b65:37365 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T13:03:33,027 INFO [RS:0;3857ccc89b65:37365 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-10T13:03:33,027 INFO [RS:0;3857ccc89b65:37365 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-10T13:03:33,028 INFO [RS:0;3857ccc89b65:37365 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-10T13:03:33,028 DEBUG [RS:0;3857ccc89b65:37365 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:03:33,028 DEBUG [RS:0;3857ccc89b65:37365 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:03:33,028 DEBUG [RS:0;3857ccc89b65:37365 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:03:33,028 DEBUG [RS:0;3857ccc89b65:37365 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:03:33,028 DEBUG [RS:0;3857ccc89b65:37365 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:03:33,028 DEBUG [RS:0;3857ccc89b65:37365 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3857ccc89b65:0, corePoolSize=2, maxPoolSize=2 2024-11-10T13:03:33,028 DEBUG [RS:0;3857ccc89b65:37365 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:03:33,028 DEBUG [RS:0;3857ccc89b65:37365 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:03:33,028 DEBUG [RS:0;3857ccc89b65:37365 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:03:33,028 DEBUG [RS:0;3857ccc89b65:37365 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:03:33,028 DEBUG [RS:0;3857ccc89b65:37365 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:03:33,028 DEBUG [RS:0;3857ccc89b65:37365 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3857ccc89b65:0, corePoolSize=1, maxPoolSize=1 2024-11-10T13:03:33,028 DEBUG [RS:0;3857ccc89b65:37365 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3857ccc89b65:0, corePoolSize=3, maxPoolSize=3 2024-11-10T13:03:33,028 DEBUG [RS:0;3857ccc89b65:37365 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3857ccc89b65:0, corePoolSize=3, maxPoolSize=3 2024-11-10T13:03:33,028 INFO [RS:0;3857ccc89b65:37365 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T13:03:33,028 INFO [RS:0;3857ccc89b65:37365 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T13:03:33,028 INFO [RS:0;3857ccc89b65:37365 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T13:03:33,028 INFO [RS:0;3857ccc89b65:37365 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-10T13:03:33,028 INFO [RS:0;3857ccc89b65:37365 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-10T13:03:33,028 INFO [RS:0;3857ccc89b65:37365 {}] hbase.ChoreService(168): Chore ScheduledChore name=3857ccc89b65,37365,1731243812784-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-10T13:03:33,033 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:33,033 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:33,046 INFO [RS:0;3857ccc89b65:37365 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-10T13:03:33,046 INFO [RS:0;3857ccc89b65:37365 {}] hbase.ChoreService(168): Chore ScheduledChore name=3857ccc89b65,37365,1731243812784-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T13:03:33,046 INFO [RS:0;3857ccc89b65:37365 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T13:03:33,046 INFO [RS:0;3857ccc89b65:37365 {}] regionserver.Replication(171): 3857ccc89b65,37365,1731243812784 started 2024-11-10T13:03:33,059 INFO [RS:0;3857ccc89b65:37365 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T13:03:33,059 INFO [RS:0;3857ccc89b65:37365 {}] regionserver.HRegionServer(1482): Serving as 3857ccc89b65,37365,1731243812784, RpcServer on 3857ccc89b65/172.17.0.2:37365, sessionid=0x10101fa12030001 2024-11-10T13:03:33,060 DEBUG [RS:0;3857ccc89b65:37365 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-10T13:03:33,060 DEBUG [RS:0;3857ccc89b65:37365 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3857ccc89b65,37365,1731243812784 2024-11-10T13:03:33,060 DEBUG [RS:0;3857ccc89b65:37365 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3857ccc89b65,37365,1731243812784' 2024-11-10T13:03:33,060 DEBUG [RS:0;3857ccc89b65:37365 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-10T13:03:33,060 DEBUG [RS:0;3857ccc89b65:37365 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-10T13:03:33,061 DEBUG [RS:0;3857ccc89b65:37365 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-10T13:03:33,061 DEBUG [RS:0;3857ccc89b65:37365 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-10T13:03:33,061 DEBUG [RS:0;3857ccc89b65:37365 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3857ccc89b65,37365,1731243812784 2024-11-10T13:03:33,061 DEBUG [RS:0;3857ccc89b65:37365 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3857ccc89b65,37365,1731243812784' 2024-11-10T13:03:33,061 DEBUG [RS:0;3857ccc89b65:37365 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-10T13:03:33,061 DEBUG [RS:0;3857ccc89b65:37365 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-10T13:03:33,061 DEBUG [RS:0;3857ccc89b65:37365 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-10T13:03:33,061 INFO [RS:0;3857ccc89b65:37365 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-10T13:03:33,061 INFO [RS:0;3857ccc89b65:37365 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-10T13:03:33,118 WARN [3857ccc89b65:34887 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-10T13:03:33,163 INFO [RS:0;3857ccc89b65:37365 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3857ccc89b65%2C37365%2C1731243812784, suffix=, logDir=hdfs://localhost:42877/user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde/WALs/3857ccc89b65,37365,1731243812784, archiveDir=hdfs://localhost:42877/user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde/oldWALs, maxLogs=32 2024-11-10T13:03:33,164 INFO [RS:0;3857ccc89b65:37365 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3857ccc89b65%2C37365%2C1731243812784.1731243813163 2024-11-10T13:03:33,169 INFO [RS:0;3857ccc89b65:37365 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde/WALs/3857ccc89b65,37365,1731243812784/3857ccc89b65%2C37365%2C1731243812784.1731243813163 2024-11-10T13:03:33,169 DEBUG [RS:0;3857ccc89b65:37365 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40497:40497),(127.0.0.1/127.0.0.1:46007:46007)] 2024-11-10T13:03:33,368 DEBUG [3857ccc89b65:34887 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-10T13:03:33,369 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=3857ccc89b65,37365,1731243812784 2024-11-10T13:03:33,370 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3857ccc89b65,37365,1731243812784, state=OPENING 2024-11-10T13:03:33,371 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-10T13:03:33,372 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34887-0x10101fa12030000, quorum=127.0.0.1:55137, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:03:33,372 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37365-0x10101fa12030001, quorum=127.0.0.1:55137, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:03:33,373 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-10T13:03:33,373 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T13:03:33,373 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T13:03:33,373 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=3857ccc89b65,37365,1731243812784}] 2024-11-10T13:03:33,526 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-10T13:03:33,528 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34421, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-10T13:03:33,531 INFO [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-10T13:03:33,531 INFO [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-10T13:03:33,533 INFO [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3857ccc89b65%2C37365%2C1731243812784.meta, suffix=.meta, logDir=hdfs://localhost:42877/user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde/WALs/3857ccc89b65,37365,1731243812784, archiveDir=hdfs://localhost:42877/user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde/oldWALs, maxLogs=32 2024-11-10T13:03:33,533 INFO [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 3857ccc89b65%2C37365%2C1731243812784.meta.1731243813533.meta 2024-11-10T13:03:33,538 INFO [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde/WALs/3857ccc89b65,37365,1731243812784/3857ccc89b65%2C37365%2C1731243812784.meta.1731243813533.meta 2024-11-10T13:03:33,544 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46007:46007),(127.0.0.1/127.0.0.1:40497:40497)] 2024-11-10T13:03:33,549 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-10T13:03:33,549 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-10T13:03:33,549 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-10T13:03:33,549 INFO [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-10T13:03:33,549 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-10T13:03:33,549 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T13:03:33,549 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-10T13:03:33,549 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-10T13:03:33,550 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-10T13:03:33,551 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-10T13:03:33,551 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:03:33,551 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:03:33,552 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-10T13:03:33,552 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-10T13:03:33,552 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:03:33,553 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:03:33,553 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-10T13:03:33,553 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-10T13:03:33,553 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:03:33,554 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:03:33,554 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-10T13:03:33,554 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-10T13:03:33,554 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T13:03:33,555 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T13:03:33,555 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-10T13:03:33,555 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42877/user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde/data/hbase/meta/1588230740 2024-11-10T13:03:33,556 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42877/user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde/data/hbase/meta/1588230740 2024-11-10T13:03:33,557 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-10T13:03:33,557 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-10T13:03:33,558 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-10T13:03:33,559 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-10T13:03:33,560 INFO [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=736035, jitterRate=-0.06408385932445526}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-10T13:03:33,560 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-10T13:03:33,560 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731243813549Writing region info on filesystem at 1731243813549Initializing all the Stores at 1731243813550 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731243813550Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731243813550Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731243813550Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731243813550Cleaning up temporary data from old regions at 1731243813557 (+7 ms)Running coprocessor post-open hooks at 1731243813560 (+3 ms)Region opened successfully at 1731243813560 2024-11-10T13:03:33,561 INFO [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731243813525 2024-11-10T13:03:33,563 DEBUG [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-10T13:03:33,563 INFO [RS_OPEN_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-10T13:03:33,564 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=3857ccc89b65,37365,1731243812784 2024-11-10T13:03:33,564 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3857ccc89b65,37365,1731243812784, state=OPEN 2024-11-10T13:03:33,569 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34887-0x10101fa12030000, quorum=127.0.0.1:55137, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-10T13:03:33,569 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37365-0x10101fa12030001, quorum=127.0.0.1:55137, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-10T13:03:33,570 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=3857ccc89b65,37365,1731243812784 2024-11-10T13:03:33,570 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T13:03:33,570 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T13:03:33,572 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-10T13:03:33,572 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=3857ccc89b65,37365,1731243812784 in 197 msec 2024-11-10T13:03:33,573 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-10T13:03:33,574 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 607 msec 2024-11-10T13:03:33,574 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-10T13:03:33,574 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-10T13:03:33,575 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-10T13:03:33,575 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3857ccc89b65,37365,1731243812784, seqNum=-1] 2024-11-10T13:03:33,576 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-10T13:03:33,577 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50779, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-10T13:03:33,581 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 660 msec 2024-11-10T13:03:33,581 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731243813581, completionTime=-1 2024-11-10T13:03:33,581 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-10T13:03:33,581 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-10T13:03:33,582 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-10T13:03:33,582 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731243873582 2024-11-10T13:03:33,582 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731243933582 2024-11-10T13:03:33,582 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 1 msec 2024-11-10T13:03:33,583 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3857ccc89b65,34887,1731243812737-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T13:03:33,583 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3857ccc89b65,34887,1731243812737-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T13:03:33,583 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3857ccc89b65,34887,1731243812737-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T13:03:33,583 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-3857ccc89b65:34887, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T13:03:33,583 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-10T13:03:33,583 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-10T13:03:33,584 DEBUG [master/3857ccc89b65:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-10T13:03:33,586 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.773sec 2024-11-10T13:03:33,587 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-10T13:03:33,587 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-10T13:03:33,587 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-10T13:03:33,587 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-10T13:03:33,587 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-10T13:03:33,587 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3857ccc89b65,34887,1731243812737-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-10T13:03:33,587 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3857ccc89b65,34887,1731243812737-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-10T13:03:33,589 DEBUG [master/3857ccc89b65:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-10T13:03:33,589 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-10T13:03:33,589 INFO [master/3857ccc89b65:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3857ccc89b65,34887,1731243812737-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T13:03:33,597 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@14628127, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-10T13:03:33,597 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 3857ccc89b65,34887,-1 for getting cluster id 2024-11-10T13:03:33,598 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-10T13:03:33,599 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0efe23ce-3471-4c0e-b5a5-87c9f366bb69' 2024-11-10T13:03:33,599 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-10T13:03:33,599 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0efe23ce-3471-4c0e-b5a5-87c9f366bb69" 2024-11-10T13:03:33,599 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e7cc10c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-10T13:03:33,599 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3857ccc89b65,34887,-1] 2024-11-10T13:03:33,600 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-10T13:03:33,600 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T13:03:33,601 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47300, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-10T13:03:33,601 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1187e2f5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-10T13:03:33,601 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-10T13:03:33,602 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3857ccc89b65,37365,1731243812784, seqNum=-1] 2024-11-10T13:03:33,602 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-10T13:03:33,603 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45436, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-10T13:03:33,605 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=3857ccc89b65,34887,1731243812737 2024-11-10T13:03:33,605 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T13:03:33,607 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-10T13:03:33,607 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-10T13:03:33,609 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:42877/user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde/WALs/test.com,8080,1, archiveDir=hdfs://localhost:42877/user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde/oldWALs, maxLogs=32 2024-11-10T13:03:33,609 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731243813609 2024-11-10T13:03:33,613 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde/WALs/test.com,8080,1/test.com%2C8080%2C1.1731243813609 2024-11-10T13:03:33,614 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40497:40497),(127.0.0.1/127.0.0.1:46007:46007)] 2024-11-10T13:03:33,615 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731243813615 2024-11-10T13:03:33,619 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:03:33,619 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:03:33,619 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:03:33,619 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:03:33,619 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:03:33,619 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde/WALs/test.com,8080,1/test.com%2C8080%2C1.1731243813609 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde/WALs/test.com,8080,1/test.com%2C8080%2C1.1731243813615 2024-11-10T13:03:33,620 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40497:40497),(127.0.0.1/127.0.0.1:46007:46007)] 2024-11-10T13:03:33,620 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:42877/user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde/WALs/test.com,8080,1/test.com%2C8080%2C1.1731243813609 is not closed yet, will try archiving it next time 2024-11-10T13:03:33,620 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:03:33,620 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:03:33,620 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:03:33,621 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:03:33,621 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:03:33,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39817 is added to blk_1073741835_1011 (size=93) 2024-11-10T13:03:33,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34429 is added to blk_1073741835_1011 (size=93) 2024-11-10T13:03:33,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39817 is added to blk_1073741836_1012 (size=93) 2024-11-10T13:03:33,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34429 is added to blk_1073741836_1012 (size=93) 2024-11-10T13:03:33,624 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42877/user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde/WALs/test.com,8080,1/test.com%2C8080%2C1.1731243813609 to hdfs://localhost:42877/user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde/oldWALs/test.com%2C8080%2C1.1731243813609 2024-11-10T13:03:34,026 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde/oldWALs 2024-11-10T13:03:34,026 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1731243813615) 2024-11-10T13:03:34,026 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-10T13:03:34,026 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-10T13:03:34,026 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T13:03:34,026 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T13:03:34,026 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T13:03:34,026 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-10T13:03:34,026 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-10T13:03:34,026 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=97942090, stopped=false 2024-11-10T13:03:34,027 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=3857ccc89b65,34887,1731243812737 2024-11-10T13:03:34,028 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37365-0x10101fa12030001, quorum=127.0.0.1:55137, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-10T13:03:34,028 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34887-0x10101fa12030000, quorum=127.0.0.1:55137, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-10T13:03:34,028 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37365-0x10101fa12030001, quorum=127.0.0.1:55137, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:03:34,028 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34887-0x10101fa12030000, quorum=127.0.0.1:55137, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:03:34,028 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-10T13:03:34,028 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-10T13:03:34,028 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T13:03:34,029 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T13:03:34,029 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34887-0x10101fa12030000, quorum=127.0.0.1:55137, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T13:03:34,029 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3857ccc89b65,37365,1731243812784' ***** 2024-11-10T13:03:34,029 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-10T13:03:34,029 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37365-0x10101fa12030001, quorum=127.0.0.1:55137, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T13:03:34,029 INFO [RS:0;3857ccc89b65:37365 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-10T13:03:34,029 INFO [RS:0;3857ccc89b65:37365 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-10T13:03:34,029 INFO [RS:0;3857ccc89b65:37365 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-10T13:03:34,029 INFO [RS:0;3857ccc89b65:37365 {}] regionserver.HRegionServer(959): stopping server 3857ccc89b65,37365,1731243812784 2024-11-10T13:03:34,029 INFO [RS:0;3857ccc89b65:37365 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-10T13:03:34,029 INFO [RS:0;3857ccc89b65:37365 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;3857ccc89b65:37365. 2024-11-10T13:03:34,029 DEBUG [RS:0;3857ccc89b65:37365 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T13:03:34,029 DEBUG [RS:0;3857ccc89b65:37365 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T13:03:34,030 INFO [RS:0;3857ccc89b65:37365 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-10T13:03:34,030 INFO [RS:0;3857ccc89b65:37365 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-10T13:03:34,030 INFO [RS:0;3857ccc89b65:37365 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-10T13:03:34,030 INFO [RS:0;3857ccc89b65:37365 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-10T13:03:34,030 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-10T13:03:34,030 INFO [RS:0;3857ccc89b65:37365 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-10T13:03:34,030 DEBUG [RS:0;3857ccc89b65:37365 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-10T13:03:34,030 DEBUG [RS:0;3857ccc89b65:37365 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-10T13:03:34,030 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-10T13:03:34,030 INFO [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-10T13:03:34,030 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-10T13:03:34,030 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-10T13:03:34,030 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-10T13:03:34,030 INFO [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-10T13:03:34,033 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,38535,1731243632054/3857ccc89b65%2C38535%2C1731243632054.1731243632271 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:34,033 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35903/user/jenkins/test-data/162f4519-c1ab-b2c9-2ef2-cd384f4339de/WALs/3857ccc89b65,44789,1731243631098/3857ccc89b65%2C44789%2C1731243631098.meta.1731243631901.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-10T13:03:34,046 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42877/user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde/data/hbase/meta/1588230740/.tmp/ns/a16202235c654c6abf067bc9a8b3541b is 43, key is default/ns:d/1731243813577/Put/seqid=0 2024-11-10T13:03:34,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39817 is added to blk_1073741837_1013 (size=5153) 2024-11-10T13:03:34,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34429 is added to blk_1073741837_1013 (size=5153) 2024-11-10T13:03:34,051 INFO [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:42877/user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde/data/hbase/meta/1588230740/.tmp/ns/a16202235c654c6abf067bc9a8b3541b 2024-11-10T13:03:34,055 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42877/user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde/data/hbase/meta/1588230740/.tmp/ns/a16202235c654c6abf067bc9a8b3541b as hdfs://localhost:42877/user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde/data/hbase/meta/1588230740/ns/a16202235c654c6abf067bc9a8b3541b 2024-11-10T13:03:34,059 INFO [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42877/user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde/data/hbase/meta/1588230740/ns/a16202235c654c6abf067bc9a8b3541b, entries=2, sequenceid=6, filesize=5.0 K 2024-11-10T13:03:34,060 INFO [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 30ms, sequenceid=6, compaction requested=false 2024-11-10T13:03:34,060 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-10T13:03:34,064 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42877/user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-10T13:03:34,064 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-10T13:03:34,064 INFO [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-10T13:03:34,064 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731243814030Running coprocessor pre-close hooks at 1731243814030Disabling compacts and flushes for region at 1731243814030Disabling writes for close at 1731243814030Obtaining lock to block concurrent updates at 1731243814030Preparing flush snapshotting stores in 1588230740 at 1731243814030Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731243814031 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731243814031Flushing 1588230740/ns: creating writer at 1731243814032 (+1 ms)Flushing 1588230740/ns: appending metadata at 1731243814046 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1731243814046Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2b0401bd: reopening flushed file at 1731243814055 (+9 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 30ms, sequenceid=6, compaction requested=false at 1731243814060 (+5 ms)Writing region close event to WAL at 1731243814061 (+1 ms)Running coprocessor post-close hooks at 1731243814064 (+3 ms)Closed at 1731243814064 2024-11-10T13:03:34,064 DEBUG [RS_CLOSE_META-regionserver/3857ccc89b65:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-10T13:03:34,098 INFO [regionserver/3857ccc89b65:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-10T13:03:34,098 INFO [regionserver/3857ccc89b65:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-10T13:03:34,230 INFO [RS:0;3857ccc89b65:37365 {}] regionserver.HRegionServer(976): stopping server 3857ccc89b65,37365,1731243812784; all regions closed. 2024-11-10T13:03:34,231 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:03:34,231 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:03:34,231 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:03:34,231 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:03:34,231 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:03:34,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34429 is added to blk_1073741834_1010 (size=1152) 2024-11-10T13:03:34,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39817 is added to blk_1073741834_1010 (size=1152) 2024-11-10T13:03:34,235 DEBUG [RS:0;3857ccc89b65:37365 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde/oldWALs 2024-11-10T13:03:34,235 INFO [RS:0;3857ccc89b65:37365 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3857ccc89b65%2C37365%2C1731243812784.meta:.meta(num 1731243813533) 2024-11-10T13:03:34,236 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:03:34,236 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:03:34,236 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:03:34,236 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:03:34,236 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:03:34,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39817 is added to blk_1073741833_1009 (size=93) 2024-11-10T13:03:34,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34429 is added to blk_1073741833_1009 (size=93) 2024-11-10T13:03:34,239 DEBUG [RS:0;3857ccc89b65:37365 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde/oldWALs 2024-11-10T13:03:34,239 INFO [RS:0;3857ccc89b65:37365 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3857ccc89b65%2C37365%2C1731243812784:(num 1731243813163) 2024-11-10T13:03:34,239 DEBUG [RS:0;3857ccc89b65:37365 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T13:03:34,239 INFO [RS:0;3857ccc89b65:37365 {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T13:03:34,240 INFO [RS:0;3857ccc89b65:37365 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-10T13:03:34,240 INFO [RS:0;3857ccc89b65:37365 {}] hbase.ChoreService(370): Chore service for: regionserver/3857ccc89b65:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-10T13:03:34,240 INFO [RS:0;3857ccc89b65:37365 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-10T13:03:34,240 INFO [regionserver/3857ccc89b65:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-10T13:03:34,240 INFO [RS:0;3857ccc89b65:37365 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37365 2024-11-10T13:03:34,242 INFO [RS:0;3857ccc89b65:37365 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-10T13:03:34,242 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37365-0x10101fa12030001, quorum=127.0.0.1:55137, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3857ccc89b65,37365,1731243812784 2024-11-10T13:03:34,242 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34887-0x10101fa12030000, quorum=127.0.0.1:55137, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-10T13:03:34,243 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3857ccc89b65,37365,1731243812784] 2024-11-10T13:03:34,246 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3857ccc89b65,37365,1731243812784 already deleted, retry=false 2024-11-10T13:03:34,247 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3857ccc89b65,37365,1731243812784 expired; onlineServers=0 2024-11-10T13:03:34,247 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '3857ccc89b65,34887,1731243812737' ***** 2024-11-10T13:03:34,247 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-10T13:03:34,247 INFO [M:0;3857ccc89b65:34887 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-10T13:03:34,247 INFO [M:0;3857ccc89b65:34887 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-10T13:03:34,247 DEBUG [M:0;3857ccc89b65:34887 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-10T13:03:34,247 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-10T13:03:34,247 DEBUG [M:0;3857ccc89b65:34887 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-10T13:03:34,247 DEBUG [master/3857ccc89b65:0:becomeActiveMaster-HFileCleaner.large.0-1731243812931 {}] cleaner.HFileCleaner(306): Exit Thread[master/3857ccc89b65:0:becomeActiveMaster-HFileCleaner.large.0-1731243812931,5,FailOnTimeoutGroup] 2024-11-10T13:03:34,247 DEBUG [master/3857ccc89b65:0:becomeActiveMaster-HFileCleaner.small.0-1731243812931 {}] cleaner.HFileCleaner(306): Exit Thread[master/3857ccc89b65:0:becomeActiveMaster-HFileCleaner.small.0-1731243812931,5,FailOnTimeoutGroup] 2024-11-10T13:03:34,247 INFO [M:0;3857ccc89b65:34887 {}] hbase.ChoreService(370): Chore service for: master/3857ccc89b65:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-10T13:03:34,247 INFO [M:0;3857ccc89b65:34887 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-10T13:03:34,247 DEBUG [M:0;3857ccc89b65:34887 {}] master.HMaster(1795): Stopping service threads 2024-11-10T13:03:34,247 INFO [M:0;3857ccc89b65:34887 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-10T13:03:34,247 INFO [M:0;3857ccc89b65:34887 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-10T13:03:34,247 INFO [M:0;3857ccc89b65:34887 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-10T13:03:34,247 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-10T13:03:34,248 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34887-0x10101fa12030000, quorum=127.0.0.1:55137, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-10T13:03:34,248 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34887-0x10101fa12030000, quorum=127.0.0.1:55137, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T13:03:34,248 DEBUG [M:0;3857ccc89b65:34887 {}] zookeeper.ZKUtil(347): master:34887-0x10101fa12030000, quorum=127.0.0.1:55137, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-10T13:03:34,248 WARN [M:0;3857ccc89b65:34887 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-10T13:03:34,249 INFO [M:0;3857ccc89b65:34887 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:42877/user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde/.lastflushedseqids 2024-11-10T13:03:34,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39817 is added to blk_1073741838_1014 (size=99) 2024-11-10T13:03:34,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34429 is added to blk_1073741838_1014 (size=99) 2024-11-10T13:03:34,256 INFO [M:0;3857ccc89b65:34887 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-10T13:03:34,256 INFO [M:0;3857ccc89b65:34887 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-10T13:03:34,256 DEBUG [M:0;3857ccc89b65:34887 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-10T13:03:34,256 INFO [M:0;3857ccc89b65:34887 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T13:03:34,256 DEBUG [M:0;3857ccc89b65:34887 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T13:03:34,256 DEBUG [M:0;3857ccc89b65:34887 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-10T13:03:34,256 DEBUG [M:0;3857ccc89b65:34887 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T13:03:34,257 INFO [M:0;3857ccc89b65:34887 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-10T13:03:34,272 DEBUG [M:0;3857ccc89b65:34887 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42877/user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4697408dc47a436a843400ddac86b004 is 82, key is hbase:meta,,1/info:regioninfo/1731243813564/Put/seqid=0 2024-11-10T13:03:34,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34429 is added to blk_1073741839_1015 (size=5672) 2024-11-10T13:03:34,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39817 is added to blk_1073741839_1015 (size=5672) 2024-11-10T13:03:34,277 INFO [M:0;3857ccc89b65:34887 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:42877/user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4697408dc47a436a843400ddac86b004 2024-11-10T13:03:34,295 DEBUG [M:0;3857ccc89b65:34887 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42877/user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5adb83e5ba084681a1daafde4602720f is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731243813580/Put/seqid=0 2024-11-10T13:03:34,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34429 is added to blk_1073741840_1016 (size=5275) 2024-11-10T13:03:34,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39817 is added to blk_1073741840_1016 (size=5275) 2024-11-10T13:03:34,299 INFO [M:0;3857ccc89b65:34887 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:42877/user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5adb83e5ba084681a1daafde4602720f 2024-11-10T13:03:34,318 DEBUG [M:0;3857ccc89b65:34887 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42877/user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b246ddb0e71a47eca8964f3ed65bbe08 is 69, key is 3857ccc89b65,37365,1731243812784/rs:state/1731243813019/Put/seqid=0 2024-11-10T13:03:34,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34429 is added to blk_1073741841_1017 (size=5156) 2024-11-10T13:03:34,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39817 is added to blk_1073741841_1017 (size=5156) 2024-11-10T13:03:34,322 INFO [M:0;3857ccc89b65:34887 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:42877/user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b246ddb0e71a47eca8964f3ed65bbe08 2024-11-10T13:03:34,340 DEBUG [M:0;3857ccc89b65:34887 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42877/user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/00aca20ca7ae45c49bef9e76f2a8e798 is 52, key is load_balancer_on/state:d/1731243813606/Put/seqid=0 2024-11-10T13:03:34,344 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37365-0x10101fa12030001, quorum=127.0.0.1:55137, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T13:03:34,344 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37365-0x10101fa12030001, quorum=127.0.0.1:55137, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T13:03:34,344 INFO [RS:0;3857ccc89b65:37365 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-10T13:03:34,344 INFO [RS:0;3857ccc89b65:37365 {}] regionserver.HRegionServer(1031): Exiting; stopping=3857ccc89b65,37365,1731243812784; zookeeper connection closed. 2024-11-10T13:03:34,344 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@792abbe6 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@792abbe6 2024-11-10T13:03:34,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34429 is added to blk_1073741842_1018 (size=5056) 2024-11-10T13:03:34,344 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-10T13:03:34,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39817 is added to blk_1073741842_1018 (size=5056) 2024-11-10T13:03:34,345 INFO [M:0;3857ccc89b65:34887 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:42877/user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/00aca20ca7ae45c49bef9e76f2a8e798 2024-11-10T13:03:34,349 DEBUG [M:0;3857ccc89b65:34887 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42877/user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4697408dc47a436a843400ddac86b004 as hdfs://localhost:42877/user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/4697408dc47a436a843400ddac86b004 2024-11-10T13:03:34,353 INFO [M:0;3857ccc89b65:34887 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42877/user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/4697408dc47a436a843400ddac86b004, entries=8, sequenceid=29, filesize=5.5 K 2024-11-10T13:03:34,353 DEBUG [M:0;3857ccc89b65:34887 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42877/user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5adb83e5ba084681a1daafde4602720f as hdfs://localhost:42877/user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/5adb83e5ba084681a1daafde4602720f 2024-11-10T13:03:34,357 INFO [M:0;3857ccc89b65:34887 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42877/user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/5adb83e5ba084681a1daafde4602720f, entries=3, sequenceid=29, filesize=5.2 K 2024-11-10T13:03:34,358 DEBUG [M:0;3857ccc89b65:34887 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42877/user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b246ddb0e71a47eca8964f3ed65bbe08 as hdfs://localhost:42877/user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/b246ddb0e71a47eca8964f3ed65bbe08 2024-11-10T13:03:34,361 INFO [M:0;3857ccc89b65:34887 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42877/user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/b246ddb0e71a47eca8964f3ed65bbe08, entries=1, sequenceid=29, filesize=5.0 K 2024-11-10T13:03:34,362 DEBUG [M:0;3857ccc89b65:34887 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42877/user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/00aca20ca7ae45c49bef9e76f2a8e798 as hdfs://localhost:42877/user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/00aca20ca7ae45c49bef9e76f2a8e798 2024-11-10T13:03:34,365 INFO [M:0;3857ccc89b65:34887 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42877/user/jenkins/test-data/e47070d8-f3a0-0b5d-6e17-2a850ca70fde/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/00aca20ca7ae45c49bef9e76f2a8e798, entries=1, sequenceid=29, filesize=4.9 K 2024-11-10T13:03:34,366 INFO [M:0;3857ccc89b65:34887 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 110ms, sequenceid=29, compaction requested=false 2024-11-10T13:03:34,368 INFO [M:0;3857ccc89b65:34887 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T13:03:34,368 DEBUG [M:0;3857ccc89b65:34887 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731243814256Disabling compacts and flushes for region at 1731243814256Disabling writes for close at 1731243814256Obtaining lock to block concurrent updates at 1731243814257 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731243814257Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731243814257Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731243814257Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731243814258 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731243814272 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731243814272Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731243814281 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731243814294 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731243814294Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731243814304 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731243814317 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731243814317Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731243814326 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731243814339 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731243814340 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@75d0de38: reopening flushed file at 1731243814349 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@32c49614: reopening flushed file at 1731243814353 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@43f07cb6: reopening flushed file at 1731243814357 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@59a599be: reopening flushed file at 1731243814361 (+4 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 110ms, sequenceid=29, compaction requested=false at 1731243814366 (+5 ms)Writing region close event to WAL at 1731243814368 (+2 ms)Closed at 1731243814368 2024-11-10T13:03:34,368 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:03:34,369 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:03:34,369 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:03:34,369 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:03:34,369 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T13:03:34,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39817 is added to blk_1073741830_1006 (size=10311) 2024-11-10T13:03:34,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34429 is added to blk_1073741830_1006 (size=10311) 2024-11-10T13:03:34,371 INFO [M:0;3857ccc89b65:34887 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-10T13:03:34,371 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-10T13:03:34,371 INFO [M:0;3857ccc89b65:34887 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34887 2024-11-10T13:03:34,372 INFO [M:0;3857ccc89b65:34887 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-10T13:03:34,474 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34887-0x10101fa12030000, quorum=127.0.0.1:55137, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T13:03:34,474 INFO [M:0;3857ccc89b65:34887 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-10T13:03:34,474 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34887-0x10101fa12030000, quorum=127.0.0.1:55137, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T13:03:34,476 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@12ac7c6{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T13:03:34,476 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@9268180{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T13:03:34,477 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T13:03:34,477 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@f9090c1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T13:03:34,477 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@352a3917{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3181f261-7f92-56e3-8fc9-effb5d5ef3a9/hadoop.log.dir/,STOPPED} 2024-11-10T13:03:34,478 WARN [BP-616104907-172.17.0.2-1731243812094 heartbeating to localhost/127.0.0.1:42877 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-10T13:03:34,478 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-10T13:03:34,478 WARN [BP-616104907-172.17.0.2-1731243812094 heartbeating to localhost/127.0.0.1:42877 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-616104907-172.17.0.2-1731243812094 (Datanode Uuid 60fd9033-d84f-474e-9c79-5a8dd21a42b2) service to localhost/127.0.0.1:42877 2024-11-10T13:03:34,478 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-10T13:03:34,479 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3181f261-7f92-56e3-8fc9-effb5d5ef3a9/cluster_acd6cf25-8840-9386-b061-b043eca8f1b0/data/data3/current/BP-616104907-172.17.0.2-1731243812094 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T13:03:34,479 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3181f261-7f92-56e3-8fc9-effb5d5ef3a9/cluster_acd6cf25-8840-9386-b061-b043eca8f1b0/data/data4/current/BP-616104907-172.17.0.2-1731243812094 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T13:03:34,479 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-10T13:03:34,481 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1926aa54{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T13:03:34,481 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7b01e479{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T13:03:34,481 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T13:03:34,481 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4548a64b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T13:03:34,482 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5bfccd5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3181f261-7f92-56e3-8fc9-effb5d5ef3a9/hadoop.log.dir/,STOPPED} 2024-11-10T13:03:34,483 WARN [BP-616104907-172.17.0.2-1731243812094 heartbeating to localhost/127.0.0.1:42877 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-10T13:03:34,483 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-10T13:03:34,483 WARN [BP-616104907-172.17.0.2-1731243812094 heartbeating to localhost/127.0.0.1:42877 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-616104907-172.17.0.2-1731243812094 (Datanode Uuid 96a832b6-fea4-4f7e-add4-3b0419bfda04) service to localhost/127.0.0.1:42877 2024-11-10T13:03:34,483 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-10T13:03:34,483 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3181f261-7f92-56e3-8fc9-effb5d5ef3a9/cluster_acd6cf25-8840-9386-b061-b043eca8f1b0/data/data1/current/BP-616104907-172.17.0.2-1731243812094 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T13:03:34,484 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3181f261-7f92-56e3-8fc9-effb5d5ef3a9/cluster_acd6cf25-8840-9386-b061-b043eca8f1b0/data/data2/current/BP-616104907-172.17.0.2-1731243812094 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-10T13:03:34,484 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-10T13:03:34,489 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@24d8e529{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-10T13:03:34,489 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1e07b1d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-10T13:03:34,489 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-10T13:03:34,490 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7fa6f74d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-10T13:03:34,490 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3287f588{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3181f261-7f92-56e3-8fc9-effb5d5ef3a9/hadoop.log.dir/,STOPPED} 2024-11-10T13:03:34,496 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-10T13:03:34,509 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-10T13:03:34,518 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=271 (was 233) Potentially hanging thread: LeaseRenewer:jenkins@localhost:42877 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42877 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: globalEventExecutor-1-22 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//io.netty.util.concurrent.GlobalEventExecutor.takeTask(GlobalEventExecutor.java:113) app//io.netty.util.concurrent.GlobalEventExecutor$TaskRunner.run(GlobalEventExecutor.java:259) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42877 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42877 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42877 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42877 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42877 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:42877 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=538 (was 515) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=47 (was 47), ProcessCount=11 (was 11), AvailableMemoryMB=7783 (was 7796)